2024-11-07 14:17:19,180 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-07 14:17:19,197 main DEBUG Took 0.014600 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 14:17:19,197 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 14:17:19,198 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 14:17:19,199 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 14:17:19,201 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,211 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 14:17:19,224 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,226 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,227 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,228 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,229 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,230 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,230 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,231 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,232 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,232 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,233 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,233 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,234 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,234 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,235 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,236 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,236 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,237 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,237 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,239 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,239 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 14:17:19,240 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,240 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 14:17:19,242 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 14:17:19,244 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 14:17:19,247 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 14:17:19,248 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 14:17:19,249 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 14:17:19,250 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 14:17:19,261 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 14:17:19,263 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 14:17:19,265 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 14:17:19,266 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 14:17:19,266 main DEBUG createAppenders(={Console}) 2024-11-07 14:17:19,267 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-07 14:17:19,267 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-07 14:17:19,267 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-07 14:17:19,268 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 14:17:19,268 main DEBUG OutputStream closed 2024-11-07 14:17:19,268 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 14:17:19,268 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 14:17:19,269 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-07 14:17:19,342 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 14:17:19,344 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 14:17:19,345 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 14:17:19,346 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 14:17:19,347 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 14:17:19,347 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 14:17:19,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 14:17:19,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 14:17:19,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 14:17:19,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 14:17:19,349 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 14:17:19,349 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 14:17:19,349 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 14:17:19,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 14:17:19,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 14:17:19,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 14:17:19,351 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 14:17:19,351 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 14:17:19,354 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 14:17:19,354 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-07 14:17:19,354 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 14:17:19,355 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-07T14:17:19,592 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e 2024-11-07 14:17:19,595 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 14:17:19,596 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T14:17:19,605 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-07T14:17:19,624 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T14:17:19,627 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671, deleteOnExit=true 2024-11-07T14:17:19,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-07T14:17:19,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/test.cache.data in system properties and HBase conf 2024-11-07T14:17:19,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T14:17:19,629 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.log.dir in system properties and HBase conf 2024-11-07T14:17:19,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T14:17:19,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T14:17:19,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-07T14:17:19,728 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T14:17:19,831 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T14:17:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T14:17:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T14:17:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T14:17:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T14:17:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T14:17:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T14:17:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T14:17:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T14:17:19,838 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T14:17:19,838 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/nfs.dump.dir in system properties and HBase conf 2024-11-07T14:17:19,839 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/java.io.tmpdir in system properties and HBase conf 2024-11-07T14:17:19,839 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T14:17:19,839 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T14:17:19,840 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T14:17:20,703 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T14:17:20,789 INFO [Time-limited test {}] log.Log(170): Logging initialized @2331ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T14:17:20,885 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T14:17:20,953 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T14:17:20,974 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T14:17:20,974 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T14:17:20,976 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T14:17:20,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T14:17:20,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.log.dir/,AVAILABLE} 2024-11-07T14:17:20,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T14:17:21,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/java.io.tmpdir/jetty-localhost-34883-hadoop-hdfs-3_4_1-tests_jar-_-any-11950108252492970367/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T14:17:21,205 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:34883} 2024-11-07T14:17:21,205 INFO [Time-limited test {}] server.Server(415): Started @2748ms 2024-11-07T14:17:21,584 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T14:17:21,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T14:17:21,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T14:17:21,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T14:17:21,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T14:17:21,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.log.dir/,AVAILABLE} 2024-11-07T14:17:21,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T14:17:21,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/java.io.tmpdir/jetty-localhost-37825-hadoop-hdfs-3_4_1-tests_jar-_-any-1610566289254913189/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T14:17:21,715 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:37825} 2024-11-07T14:17:21,715 INFO [Time-limited test {}] server.Server(415): Started @3258ms 2024-11-07T14:17:21,778 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T14:17:22,228 WARN [Thread-70 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data2/current/BP-551066518-172.17.0.2-1730989040460/current, will proceed with Du for space computation calculation, 2024-11-07T14:17:22,228 WARN [Thread-69 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data1/current/BP-551066518-172.17.0.2-1730989040460/current, will proceed with Du for space computation calculation, 2024-11-07T14:17:22,272 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T14:17:22,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x901fc126ed430a9e with lease ID 0xf018677f0c1c0a25: Processing first storage report for DS-80eb1fae-424d-47c9-a968-ddde82085679 from datanode DatanodeRegistration(127.0.0.1:34705, datanodeUuid=1bdab903-9c17-4603-9ac5-a031956640f6, infoPort=44351, infoSecurePort=0, ipcPort=41073, storageInfo=lv=-57;cid=testClusterID;nsid=945772960;c=1730989040460) 2024-11-07T14:17:22,344 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x901fc126ed430a9e with lease ID 0xf018677f0c1c0a25: from storage DS-80eb1fae-424d-47c9-a968-ddde82085679 node DatanodeRegistration(127.0.0.1:34705, datanodeUuid=1bdab903-9c17-4603-9ac5-a031956640f6, infoPort=44351, infoSecurePort=0, ipcPort=41073, storageInfo=lv=-57;cid=testClusterID;nsid=945772960;c=1730989040460), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T14:17:22,344 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x901fc126ed430a9e with lease ID 0xf018677f0c1c0a25: Processing first storage report for DS-a478517b-f666-4233-b780-5c3474bb9bab from datanode DatanodeRegistration(127.0.0.1:34705, datanodeUuid=1bdab903-9c17-4603-9ac5-a031956640f6, infoPort=44351, infoSecurePort=0, ipcPort=41073, storageInfo=lv=-57;cid=testClusterID;nsid=945772960;c=1730989040460) 2024-11-07T14:17:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x901fc126ed430a9e with lease ID 0xf018677f0c1c0a25: from storage DS-a478517b-f666-4233-b780-5c3474bb9bab node DatanodeRegistration(127.0.0.1:34705, datanodeUuid=1bdab903-9c17-4603-9ac5-a031956640f6, infoPort=44351, infoSecurePort=0, ipcPort=41073, storageInfo=lv=-57;cid=testClusterID;nsid=945772960;c=1730989040460), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T14:17:22,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e 2024-11-07T14:17:22,487 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/zookeeper_0, clientPort=51818, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T14:17:22,498 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51818 2024-11-07T14:17:22,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:22,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:22,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741825_1001 (size=7) 2024-11-07T14:17:23,160 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 with version=8 2024-11-07T14:17:23,160 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/hbase-staging 2024-11-07T14:17:23,298 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T14:17:23,584 INFO [Time-limited test {}] client.ConnectionUtils(129): master/69430dbfd73f:0 server-side Connection retries=45 2024-11-07T14:17:23,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:23,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:23,606 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T14:17:23,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:23,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T14:17:23,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T14:17:23,831 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T14:17:23,841 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T14:17:23,845 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T14:17:23,874 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12214 (auto-detected) 2024-11-07T14:17:23,875 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-07T14:17:23,895 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40909 2024-11-07T14:17:23,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:23,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:23,919 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40909 connecting to ZooKeeper ensemble=127.0.0.1:51818 2024-11-07T14:17:23,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409090x0, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T14:17:23,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40909-0x1018318208e0000 connected 2024-11-07T14:17:23,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T14:17:23,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T14:17:23,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T14:17:23,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40909 2024-11-07T14:17:23,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40909 2024-11-07T14:17:23,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40909 2024-11-07T14:17:24,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40909 2024-11-07T14:17:24,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40909 2024-11-07T14:17:24,010 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8, hbase.cluster.distributed=false 2024-11-07T14:17:24,082 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/69430dbfd73f:0 server-side Connection retries=45 2024-11-07T14:17:24,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:24,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:24,083 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T14:17:24,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T14:17:24,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T14:17:24,086 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T14:17:24,089 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T14:17:24,090 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45917 2024-11-07T14:17:24,092 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T14:17:24,098 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T14:17:24,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:24,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:24,109 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45917 connecting to ZooKeeper ensemble=127.0.0.1:51818 2024-11-07T14:17:24,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459170x0, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T14:17:24,114 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45917-0x1018318208e0001 connected 2024-11-07T14:17:24,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T14:17:24,116 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T14:17:24,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T14:17:24,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-07T14:17:24,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45917 2024-11-07T14:17:24,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45917 2024-11-07T14:17:24,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-07T14:17:24,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45917 2024-11-07T14:17:24,122 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/69430dbfd73f,40909,1730989043291 2024-11-07T14:17:24,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T14:17:24,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T14:17:24,132 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/69430dbfd73f,40909,1730989043291 2024-11-07T14:17:24,140 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;69430dbfd73f:40909 2024-11-07T14:17:24,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T14:17:24,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T14:17:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:24,154 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T14:17:24,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T14:17:24,155 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/69430dbfd73f,40909,1730989043291 from backup master directory 2024-11-07T14:17:24,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/69430dbfd73f,40909,1730989043291 2024-11-07T14:17:24,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T14:17:24,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T14:17:24,159 WARN [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T14:17:24,159 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=69430dbfd73f,40909,1730989043291 2024-11-07T14:17:24,162 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T14:17:24,163 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T14:17:24,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741826_1002 (size=42) 2024-11-07T14:17:24,633 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/hbase.id with ID: 74151dd5-bc79-44a6-a920-eee3c8d4b757 2024-11-07T14:17:24,674 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T14:17:24,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:24,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:24,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741827_1003 (size=196) 2024-11-07T14:17:25,133 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:17:25,136 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T14:17:25,153 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:25,157 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T14:17:25,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741828_1004 (size=1189) 2024-11-07T14:17:25,606 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store 2024-11-07T14:17:25,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741829_1005 (size=34) 2024-11-07T14:17:25,627 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T14:17:25,627 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:25,629 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T14:17:25,629 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:17:25,629 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:17:25,629 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T14:17:25,629 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:17:25,630 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:17:25,630 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T14:17:25,632 WARN [master/69430dbfd73f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/.initializing 2024-11-07T14:17:25,632 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/WALs/69430dbfd73f,40909,1730989043291 2024-11-07T14:17:25,638 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T14:17:25,649 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=69430dbfd73f%2C40909%2C1730989043291, suffix=, logDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/WALs/69430dbfd73f,40909,1730989043291, archiveDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/oldWALs, maxLogs=10 2024-11-07T14:17:25,674 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/WALs/69430dbfd73f,40909,1730989043291/69430dbfd73f%2C40909%2C1730989043291.1730989045655, exclude list is [], retry=0 2024-11-07T14:17:25,691 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34705,DS-80eb1fae-424d-47c9-a968-ddde82085679,DISK] 2024-11-07T14:17:25,694 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-07T14:17:25,733 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/WALs/69430dbfd73f,40909,1730989043291/69430dbfd73f%2C40909%2C1730989043291.1730989045655 2024-11-07T14:17:25,734 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44351:44351)] 2024-11-07T14:17:25,735 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:25,735 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:25,739 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,740 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T14:17:25,809 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:25,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:25,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T14:17:25,816 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:25,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:25,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T14:17:25,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:25,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:25,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T14:17:25,824 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:25,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:25,828 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,829 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,838 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T14:17:25,842 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T14:17:25,847 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:17:25,849 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63854293, jitterRate=-0.04849688708782196}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T14:17:25,853 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T14:17:25,854 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T14:17:25,883 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b75a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:25,918 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-07T14:17:25,930 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T14:17:25,930 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T14:17:25,932 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T14:17:25,934 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T14:17:25,939 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-07T14:17:25,939 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T14:17:25,965 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T14:17:25,977 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T14:17:25,980 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-07T14:17:25,982 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T14:17:25,983 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T14:17:25,985 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-07T14:17:25,987 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T14:17:25,990 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T14:17:25,991 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-07T14:17:25,992 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T14:17:25,994 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T14:17:26,004 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T14:17:26,005 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T14:17:26,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T14:17:26,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T14:17:26,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,010 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=69430dbfd73f,40909,1730989043291, sessionid=0x1018318208e0000, setting cluster-up flag (Was=false) 2024-11-07T14:17:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,029 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T14:17:26,031 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=69430dbfd73f,40909,1730989043291 2024-11-07T14:17:26,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:26,041 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T14:17:26,043 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=69430dbfd73f,40909,1730989043291 2024-11-07T14:17:26,134 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-07T14:17:26,137 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;69430dbfd73f:45917 2024-11-07T14:17:26,138 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1008): ClusterId : 74151dd5-bc79-44a6-a920-eee3c8d4b757 2024-11-07T14:17:26,140 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T14:17:26,141 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T14:17:26,143 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T14:17:26,146 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T14:17:26,146 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T14:17:26,149 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T14:17:26,149 DEBUG [RS:0;69430dbfd73f:45917 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2242a3cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:26,151 DEBUG [RS:0;69430dbfd73f:45917 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3638753f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=69430dbfd73f/172.17.0.2:0 2024-11-07T14:17:26,149 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 69430dbfd73f,40909,1730989043291 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T14:17:26,154 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-07T14:17:26,154 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/69430dbfd73f:0, corePoolSize=5, maxPoolSize=5 2024-11-07T14:17:26,154 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-07T14:17:26,154 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-07T14:17:26,154 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/69430dbfd73f:0, corePoolSize=5, maxPoolSize=5 2024-11-07T14:17:26,154 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/69430dbfd73f:0, corePoolSize=5, maxPoolSize=5 2024-11-07T14:17:26,154 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/69430dbfd73f:0, corePoolSize=5, maxPoolSize=5 2024-11-07T14:17:26,155 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/69430dbfd73f:0, corePoolSize=10, maxPoolSize=10 2024-11-07T14:17:26,155 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,155 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/69430dbfd73f:0, corePoolSize=2, maxPoolSize=2 2024-11-07T14:17:26,156 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,156 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(3073): reportForDuty to master=69430dbfd73f,40909,1730989043291 with isa=69430dbfd73f/172.17.0.2:45917, startcode=1730989044081 2024-11-07T14:17:26,158 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730989076158 2024-11-07T14:17:26,159 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T14:17:26,161 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T14:17:26,162 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T14:17:26,162 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-07T14:17:26,165 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T14:17:26,165 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T14:17:26,166 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T14:17:26,166 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T14:17:26,166 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:26,166 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,167 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T14:17:26,168 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T14:17:26,170 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T14:17:26,170 DEBUG [RS:0;69430dbfd73f:45917 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T14:17:26,170 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T14:17:26,173 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T14:17:26,174 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T14:17:26,178 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.large.0-1730989046175,5,FailOnTimeoutGroup] 2024-11-07T14:17:26,178 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.small.0-1730989046178,5,FailOnTimeoutGroup] 2024-11-07T14:17:26,178 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,179 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T14:17:26,180 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,180 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741831_1007 (size=1039) 2024-11-07T14:17:26,208 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35447, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T14:17:26,214 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40909 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,216 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40909 {}] master.ServerManager(486): Registering regionserver=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,231 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:17:26,231 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34807 2024-11-07T14:17:26,231 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-07T14:17:26,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T14:17:26,236 DEBUG [RS:0;69430dbfd73f:45917 {}] zookeeper.ZKUtil(111): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,236 WARN [RS:0;69430dbfd73f:45917 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T14:17:26,236 INFO [RS:0;69430dbfd73f:45917 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T14:17:26,237 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,239 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [69430dbfd73f,45917,1730989044081] 2024-11-07T14:17:26,250 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-07T14:17:26,262 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T14:17:26,278 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T14:17:26,281 INFO [RS:0;69430dbfd73f:45917 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T14:17:26,281 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,282 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-07T14:17:26,289 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,289 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,289 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,289 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,289 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/69430dbfd73f:0, corePoolSize=2, maxPoolSize=2 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,290 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/69430dbfd73f:0, corePoolSize=1, maxPoolSize=1 2024-11-07T14:17:26,291 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/69430dbfd73f:0, corePoolSize=3, maxPoolSize=3 2024-11-07T14:17:26,291 DEBUG [RS:0;69430dbfd73f:45917 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0, corePoolSize=3, maxPoolSize=3 2024-11-07T14:17:26,291 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,292 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,292 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,292 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,292 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,45917,1730989044081-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T14:17:26,315 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T14:17:26,317 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,45917,1730989044081-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:26,346 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.Replication(204): 69430dbfd73f,45917,1730989044081 started 2024-11-07T14:17:26,347 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1767): Serving as 69430dbfd73f,45917,1730989044081, RpcServer on 69430dbfd73f/172.17.0.2:45917, sessionid=0x1018318208e0001 2024-11-07T14:17:26,347 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T14:17:26,348 DEBUG [RS:0;69430dbfd73f:45917 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,348 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '69430dbfd73f,45917,1730989044081' 2024-11-07T14:17:26,348 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T14:17:26,349 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T14:17:26,349 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T14:17:26,350 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T14:17:26,350 DEBUG [RS:0;69430dbfd73f:45917 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:26,350 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '69430dbfd73f,45917,1730989044081' 2024-11-07T14:17:26,350 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T14:17:26,351 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T14:17:26,351 DEBUG [RS:0;69430dbfd73f:45917 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T14:17:26,351 INFO [RS:0;69430dbfd73f:45917 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T14:17:26,351 INFO [RS:0;69430dbfd73f:45917 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T14:17:26,457 INFO [RS:0;69430dbfd73f:45917 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T14:17:26,460 INFO [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=69430dbfd73f%2C45917%2C1730989044081, suffix=, logDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081, archiveDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/oldWALs, maxLogs=32 2024-11-07T14:17:26,478 DEBUG [RS:0;69430dbfd73f:45917 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081/69430dbfd73f%2C45917%2C1730989044081.1730989046463, exclude list is [], retry=0 2024-11-07T14:17:26,484 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34705,DS-80eb1fae-424d-47c9-a968-ddde82085679,DISK] 2024-11-07T14:17:26,487 INFO [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081/69430dbfd73f%2C45917%2C1730989044081.1730989046463 2024-11-07T14:17:26,488 DEBUG [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44351:44351)] 2024-11-07T14:17:26,583 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-07T14:17:26,584 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:17:26,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741833_1009 (size=32) 2024-11-07T14:17:26,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:26,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T14:17:27,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T14:17:27,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T14:17:27,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T14:17:27,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T14:17:27,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T14:17:27,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740 2024-11-07T14:17:27,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740 2024-11-07T14:17:27,015 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:17:27,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T14:17:27,022 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:17:27,024 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65699552, jitterRate=-0.021000385284423828}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:17:27,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T14:17:27,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T14:17:27,027 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T14:17:27,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T14:17:27,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T14:17:27,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T14:17:27,028 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T14:17:27,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T14:17:27,031 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T14:17:27,031 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-07T14:17:27,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T14:17:27,045 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T14:17:27,047 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T14:17:27,199 DEBUG [69430dbfd73f:40909 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T14:17:27,204 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:27,210 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 69430dbfd73f,45917,1730989044081, state=OPENING 2024-11-07T14:17:27,215 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T14:17:27,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:27,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:27,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T14:17:27,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T14:17:27,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:27,394 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:27,396 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T14:17:27,399 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T14:17:27,409 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-07T14:17:27,409 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T14:17:27,410 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-07T14:17:27,413 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=69430dbfd73f%2C45917%2C1730989044081.meta, suffix=.meta, logDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081, archiveDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/oldWALs, maxLogs=32 2024-11-07T14:17:27,431 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081/69430dbfd73f%2C45917%2C1730989044081.meta.1730989047415.meta, exclude list is [], retry=0 2024-11-07T14:17:27,435 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34705,DS-80eb1fae-424d-47c9-a968-ddde82085679,DISK] 2024-11-07T14:17:27,439 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/WALs/69430dbfd73f,45917,1730989044081/69430dbfd73f%2C45917%2C1730989044081.meta.1730989047415.meta 2024-11-07T14:17:27,440 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44351:44351)] 2024-11-07T14:17:27,440 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:27,442 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T14:17:27,504 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T14:17:27,509 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T14:17:27,513 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T14:17:27,514 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:27,514 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-07T14:17:27,514 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-07T14:17:27,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T14:17:27,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T14:17:27,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T14:17:27,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T14:17:27,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T14:17:27,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T14:17:27,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T14:17:27,528 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740 2024-11-07T14:17:27,531 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740 2024-11-07T14:17:27,533 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:17:27,536 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T14:17:27,537 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63494013, jitterRate=-0.053865477442741394}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:17:27,539 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T14:17:27,547 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730989047388 2024-11-07T14:17:27,558 DEBUG [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T14:17:27,559 INFO [RS_OPEN_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-07T14:17:27,560 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:27,562 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 69430dbfd73f,45917,1730989044081, state=OPEN 2024-11-07T14:17:27,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T14:17:27,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T14:17:27,566 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T14:17:27,566 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T14:17:27,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T14:17:27,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=69430dbfd73f,45917,1730989044081 in 346 msec 2024-11-07T14:17:27,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T14:17:27,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 535 msec 2024-11-07T14:17:27,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4980 sec 2024-11-07T14:17:27,582 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730989047582, completionTime=-1 2024-11-07T14:17:27,582 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T14:17:27,582 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-07T14:17:27,621 DEBUG [hconnection-0x10f14607-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:27,624 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:27,635 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-07T14:17:27,635 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730989107635 2024-11-07T14:17:27,635 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730989167635 2024-11-07T14:17:27,635 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-11-07T14:17:27,658 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:27,659 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:27,659 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:27,660 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-69430dbfd73f:40909, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:27,661 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:27,667 DEBUG [master/69430dbfd73f:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-07T14:17:27,669 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-07T14:17:27,670 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T14:17:27,677 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-07T14:17:27,680 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:17:27,681 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:27,683 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:17:27,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741835_1011 (size=358) 2024-11-07T14:17:28,120 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 88991fc0836214a8e1586689b73580c1, NAME => 'hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:17:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741836_1012 (size=42) 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 88991fc0836214a8e1586689b73580c1, disabling compactions & flushes 2024-11-07T14:17:28,531 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. after waiting 0 ms 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,531 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,531 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 88991fc0836214a8e1586689b73580c1: 2024-11-07T14:17:28,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:17:28,541 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1730989048535"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989048535"}]},"ts":"1730989048535"} 2024-11-07T14:17:28,573 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:17:28,575 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:17:28,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989048575"}]},"ts":"1730989048575"} 2024-11-07T14:17:28,583 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-07T14:17:28,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=88991fc0836214a8e1586689b73580c1, ASSIGN}] 2024-11-07T14:17:28,591 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=88991fc0836214a8e1586689b73580c1, ASSIGN 2024-11-07T14:17:28,593 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=88991fc0836214a8e1586689b73580c1, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:17:28,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=88991fc0836214a8e1586689b73580c1, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:28,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 88991fc0836214a8e1586689b73580c1, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:28,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:28,908 INFO [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,908 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 88991fc0836214a8e1586689b73580c1, NAME => 'hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:28,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:28,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,911 INFO [StoreOpener-88991fc0836214a8e1586689b73580c1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,913 INFO [StoreOpener-88991fc0836214a8e1586689b73580c1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88991fc0836214a8e1586689b73580c1 columnFamilyName info 2024-11-07T14:17:28,914 DEBUG [StoreOpener-88991fc0836214a8e1586689b73580c1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:28,914 INFO [StoreOpener-88991fc0836214a8e1586689b73580c1-1 {}] regionserver.HStore(327): Store=88991fc0836214a8e1586689b73580c1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:28,916 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,916 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,920 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 88991fc0836214a8e1586689b73580c1 2024-11-07T14:17:28,923 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:17:28,924 INFO [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 88991fc0836214a8e1586689b73580c1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71281744, jitterRate=0.06218075752258301}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T14:17:28,924 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 88991fc0836214a8e1586689b73580c1: 2024-11-07T14:17:28,926 INFO [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1., pid=6, masterSystemTime=1730989048902 2024-11-07T14:17:28,929 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,929 INFO [RS_OPEN_PRIORITY_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:17:28,930 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=88991fc0836214a8e1586689b73580c1, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:28,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T14:17:28,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 88991fc0836214a8e1586689b73580c1, server=69430dbfd73f,45917,1730989044081 in 186 msec 2024-11-07T14:17:28,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T14:17:28,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=88991fc0836214a8e1586689b73580c1, ASSIGN in 349 msec 2024-11-07T14:17:28,942 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:17:28,942 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989048942"}]},"ts":"1730989048942"} 2024-11-07T14:17:28,944 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-07T14:17:28,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:17:28,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2770 sec 2024-11-07T14:17:28,981 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-07T14:17:28,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-07T14:17:28,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:28,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:17:29,012 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-07T14:17:29,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T14:17:29,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-07T14:17:29,036 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-07T14:17:29,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T14:17:29,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-07T14:17:29,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-07T14:17:29,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-07T14:17:29,065 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.906sec 2024-11-07T14:17:29,066 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T14:17:29,068 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T14:17:29,069 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T14:17:29,069 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T14:17:29,069 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T14:17:29,070 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T14:17:29,071 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T14:17:29,077 DEBUG [master/69430dbfd73f:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-07T14:17:29,078 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T14:17:29,078 INFO [master/69430dbfd73f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=69430dbfd73f,40909,1730989043291-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T14:17:29,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-11-07T14:17:29,141 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-07T14:17:29,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,152 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T14:17:29,152 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T14:17:29,161 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,170 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,178 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=69430dbfd73f,40909,1730989043291 2024-11-07T14:17:29,195 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=325, ProcessCount=11, AvailableMemoryMB=5966 2024-11-07T14:17:29,206 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:17:29,209 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:17:29,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:17:29,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:17:29,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:29,225 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:17:29,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-07T14:17:29,225 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:29,227 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:17:29,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T14:17:29,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741837_1013 (size=960) 2024-11-07T14:17:29,241 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:17:29,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741838_1014 (size=53) 2024-11-07T14:17:29,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:29,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5a16e97064ea2ba83f416db90324fc7e, disabling compactions & flushes 2024-11-07T14:17:29,252 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. after waiting 0 ms 2024-11-07T14:17:29,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,252 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:29,254 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:17:29,255 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989049254"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989049254"}]},"ts":"1730989049254"} 2024-11-07T14:17:29,258 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:17:29,260 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:17:29,260 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989049260"}]},"ts":"1730989049260"} 2024-11-07T14:17:29,263 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:17:29,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, ASSIGN}] 2024-11-07T14:17:29,270 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, ASSIGN 2024-11-07T14:17:29,271 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:17:29,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T14:17:29,422 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5a16e97064ea2ba83f416db90324fc7e, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:29,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:29,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T14:17:29,579 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:29,588 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,588 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:29,589 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,589 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:29,589 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,589 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,592 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,596 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:29,597 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a16e97064ea2ba83f416db90324fc7e columnFamilyName A 2024-11-07T14:17:29,597 DEBUG [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:29,598 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(327): Store=5a16e97064ea2ba83f416db90324fc7e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:29,598 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,601 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:29,601 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a16e97064ea2ba83f416db90324fc7e columnFamilyName B 2024-11-07T14:17:29,602 DEBUG [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:29,602 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(327): Store=5a16e97064ea2ba83f416db90324fc7e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:29,603 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,605 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:29,605 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a16e97064ea2ba83f416db90324fc7e columnFamilyName C 2024-11-07T14:17:29,605 DEBUG [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:29,606 INFO [StoreOpener-5a16e97064ea2ba83f416db90324fc7e-1 {}] regionserver.HStore(327): Store=5a16e97064ea2ba83f416db90324fc7e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:29,606 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,607 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,608 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,611 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:17:29,613 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:29,616 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:17:29,617 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 5a16e97064ea2ba83f416db90324fc7e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70958997, jitterRate=0.05737145245075226}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:17:29,618 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:29,619 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., pid=11, masterSystemTime=1730989049579 2024-11-07T14:17:29,622 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,622 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:29,623 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5a16e97064ea2ba83f416db90324fc7e, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:29,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-07T14:17:29,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 in 201 msec 2024-11-07T14:17:29,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T14:17:29,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, ASSIGN in 362 msec 2024-11-07T14:17:29,634 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:17:29,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989049634"}]},"ts":"1730989049634"} 2024-11-07T14:17:29,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:17:29,641 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:17:29,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 421 msec 2024-11-07T14:17:29,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T14:17:29,844 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-07T14:17:29,850 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fcb5f29 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fdf5682 2024-11-07T14:17:29,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6e36fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,855 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,858 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,861 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:17:29,863 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:17:29,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f2091cc to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d38d10 2024-11-07T14:17:29,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f343a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,879 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09bd0964 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c63ae4e 2024-11-07T14:17:29,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1324ee83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,887 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18cb251d to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@736f1673 2024-11-07T14:17:29,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478bae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,895 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45b55c24 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ee2166f 2024-11-07T14:17:29,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48068a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,903 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e52b42a to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f34ff67 2024-11-07T14:17:29,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,910 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ed28bb to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b5cad1a 2024-11-07T14:17:29,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295cb1ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,918 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a1285d to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c3b736e 2024-11-07T14:17:29,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70267494, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,925 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-11-07T14:17:29,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,930 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47fe2fa7 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6502d571 2024-11-07T14:17:29,934 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c915d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:29,941 DEBUG [hconnection-0x6acce1ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,942 DEBUG [hconnection-0x75aabd1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,944 DEBUG [hconnection-0x6bdb6869-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,944 DEBUG [hconnection-0x7d7478c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,947 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,947 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,948 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,959 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,963 DEBUG [hconnection-0x12134611-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,965 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:29,974 DEBUG [hconnection-0x7c0bf2ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-07T14:17:29,975 DEBUG [hconnection-0x3d0fdd06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,977 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:29,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:29,979 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:29,985 DEBUG [hconnection-0x42186a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:29,987 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,988 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:29,992 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:29,996 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:30,029 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:30,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:30,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:17:30,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:30,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:30,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:30,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:30,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:30,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:30,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:30,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:30,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:30,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/796e2af9d0334fc39af0344e03d7f9f6 is 50, key is test_row_0/A:col10/1730989050012/Put/seqid=0 2024-11-07T14:17:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741839_1015 (size=12001) 2024-11-07T14:17:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:30,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989110271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989110286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989110289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989110293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989110297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,349 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:30,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989110455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989110457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989110458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989110459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989110460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:30,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:30,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:30,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989110666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989110668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989110669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989110675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/796e2af9d0334fc39af0344e03d7f9f6 2024-11-07T14:17:30,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989110675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,689 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:30,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:30,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,849 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:30,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:30,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:30,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:30,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/2e20cb1039314b609e9f7c65b5019328 is 50, key is test_row_0/B:col10/1730989050012/Put/seqid=0 2024-11-07T14:17:30,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741840_1016 (size=12001) 2024-11-07T14:17:30,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989110975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989110977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989110977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:30,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:30,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989110994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989110996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,005 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:31,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:31,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:31,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:31,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:31,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:31,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:31,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/2e20cb1039314b609e9f7c65b5019328 2024-11-07T14:17:31,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a35997f314414218acc6247d02e434ca is 50, key is test_row_0/C:col10/1730989050012/Put/seqid=0 2024-11-07T14:17:31,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741841_1017 (size=12001) 2024-11-07T14:17:31,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a35997f314414218acc6247d02e434ca 2024-11-07T14:17:31,476 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:31,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:31,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/796e2af9d0334fc39af0344e03d7f9f6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6 2024-11-07T14:17:31,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:31,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989111494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:17:31,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989111496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989111507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/2e20cb1039314b609e9f7c65b5019328 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328 2024-11-07T14:17:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989111507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:31,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989111509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:17:31,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a35997f314414218acc6247d02e434ca as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca 2024-11-07T14:17:31,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:17:31,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 5a16e97064ea2ba83f416db90324fc7e in 1508ms, sequenceid=13, compaction requested=false 2024-11-07T14:17:31,547 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-07T14:17:31,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:31,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:31,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T14:17:31,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:31,634 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:17:31,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:31,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:31,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:31,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:31,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:31,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/e7f0075ed0954288bf8d73fd35e29be8 is 50, key is test_row_0/A:col10/1730989050282/Put/seqid=0 2024-11-07T14:17:31,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741842_1018 (size=12001) 2024-11-07T14:17:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:32,113 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/e7f0075ed0954288bf8d73fd35e29be8 2024-11-07T14:17:32,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/7bb990414f01430881cc443124c0440c is 50, key is test_row_0/B:col10/1730989050282/Put/seqid=0 2024-11-07T14:17:32,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741843_1019 (size=12001) 2024-11-07T14:17:32,183 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/7bb990414f01430881cc443124c0440c 2024-11-07T14:17:32,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/44f145425e764e4ab656989d4140db3b is 50, key is test_row_0/C:col10/1730989050282/Put/seqid=0 2024-11-07T14:17:32,254 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T14:17:32,256 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-07T14:17:32,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741844_1020 (size=12001) 2024-11-07T14:17:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:32,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:32,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989112538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989112545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989112543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989112548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989112548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989112651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989112657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989112656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989112658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989112658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,677 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/44f145425e764e4ab656989d4140db3b 2024-11-07T14:17:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/e7f0075ed0954288bf8d73fd35e29be8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8 2024-11-07T14:17:32,705 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T14:17:32,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/7bb990414f01430881cc443124c0440c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c 2024-11-07T14:17:32,743 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T14:17:32,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/44f145425e764e4ab656989d4140db3b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b 2024-11-07T14:17:32,765 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b, entries=150, sequenceid=37, filesize=11.7 K 2024-11-07T14:17:32,770 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 5a16e97064ea2ba83f416db90324fc7e in 1135ms, sequenceid=37, compaction requested=false 2024-11-07T14:17:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-07T14:17:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-07T14:17:32,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-07T14:17:32,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7840 sec 2024-11-07T14:17:32,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.8120 sec 2024-11-07T14:17:32,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:17:32,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:32,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:32,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:32,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:32,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:32,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:32,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:32,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/80c512a7665542e0acbc7d118b45ab01 is 50, key is test_row_0/A:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741845_1021 (size=14341) 2024-11-07T14:17:32,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989112930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989112939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989112943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989112937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:32,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:32,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989112947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,027 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:17:33,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989113049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989113052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989113055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989113047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989113063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989113263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989113263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989113264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989113264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989113275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/80c512a7665542e0acbc7d118b45ab01 2024-11-07T14:17:33,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f572a7ea75e64b3aa6c565ad28077dd5 is 50, key is test_row_0/B:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:33,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741846_1022 (size=12001) 2024-11-07T14:17:33,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989113569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989113573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989113573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989113574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989113582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:33,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f572a7ea75e64b3aa6c565ad28077dd5 2024-11-07T14:17:33,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/79bfd461eb9e4e49a339530fa63c01d0 is 50, key is test_row_0/C:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:33,828 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T14:17:33,829 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T14:17:33,831 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-07T14:17:33,831 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-07T14:17:33,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T14:17:33,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T14:17:33,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T14:17:33,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T14:17:33,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T14:17:33,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T14:17:33,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741847_1023 (size=12001) 2024-11-07T14:17:34,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:34,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989114073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989114082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989114082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989114083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T14:17:34,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:34,093 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-07T14:17:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989114090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-07T14:17:34,099 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:34,101 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:34,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T14:17:34,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T14:17:34,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-07T14:17:34,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:34,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:34,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:34,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:34,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/79bfd461eb9e4e49a339530fa63c01d0 2024-11-07T14:17:34,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/80c512a7665542e0acbc7d118b45ab01 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01 2024-11-07T14:17:34,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01, entries=200, sequenceid=51, filesize=14.0 K 2024-11-07T14:17:34,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f572a7ea75e64b3aa6c565ad28077dd5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5 2024-11-07T14:17:34,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5, entries=150, sequenceid=51, filesize=11.7 K 2024-11-07T14:17:34,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/79bfd461eb9e4e49a339530fa63c01d0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0 2024-11-07T14:17:34,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0, entries=150, sequenceid=51, filesize=11.7 K 2024-11-07T14:17:34,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 5a16e97064ea2ba83f416db90324fc7e in 1470ms, sequenceid=51, compaction requested=true 2024-11-07T14:17:34,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:34,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:34,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:34,338 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:34,338 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:34,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:34,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:34,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:34,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:34,343 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:34,344 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:34,345 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,345 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.2 K 2024-11-07T14:17:34,347 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e20cb1039314b609e9f7c65b5019328, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989049981 2024-11-07T14:17:34,348 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb990414f01430881cc443124c0440c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730989050282 2024-11-07T14:17:34,349 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f572a7ea75e64b3aa6c565ad28077dd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:34,352 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:34,352 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:34,353 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,353 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=37.4 K 2024-11-07T14:17:34,355 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 796e2af9d0334fc39af0344e03d7f9f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989049981 2024-11-07T14:17:34,356 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7f0075ed0954288bf8d73fd35e29be8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730989050282 2024-11-07T14:17:34,359 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80c512a7665542e0acbc7d118b45ab01, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T14:17:34,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:34,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-07T14:17:34,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,411 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:17:34,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:34,411 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#10 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:34,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:34,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:34,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:34,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:34,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:34,413 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/39eb218744e3469f9e35127787601ebc is 50, key is test_row_0/B:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:34,421 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#9 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:34,422 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/db00db09b197479daee8b01310fa4086 is 50, key is test_row_0/A:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:34,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/360f07f8cb1b4079a2641ba99ab6cd1d is 50, key is test_row_0/A:col10/1730989052936/Put/seqid=0 2024-11-07T14:17:34,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741848_1024 (size=12104) 2024-11-07T14:17:34,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741849_1025 (size=12104) 2024-11-07T14:17:34,483 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/39eb218744e3469f9e35127787601ebc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/39eb218744e3469f9e35127787601ebc 2024-11-07T14:17:34,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741850_1026 (size=12001) 2024-11-07T14:17:34,492 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/360f07f8cb1b4079a2641ba99ab6cd1d 2024-11-07T14:17:34,506 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/db00db09b197479daee8b01310fa4086 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/db00db09b197479daee8b01310fa4086 2024-11-07T14:17:34,537 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into db00db09b197479daee8b01310fa4086(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:34,537 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:34,538 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989054336; duration=0sec 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:34,539 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 39eb218744e3469f9e35127787601ebc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:34,539 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989054338; duration=0sec 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:34,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:34,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:34,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:34,542 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:34,543 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.2 K 2024-11-07T14:17:34,544 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a35997f314414218acc6247d02e434ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989049981 2024-11-07T14:17:34,544 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44f145425e764e4ab656989d4140db3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730989050282 2024-11-07T14:17:34,545 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79bfd461eb9e4e49a339530fa63c01d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/552b54050d4647c5889b3c1978d53940 is 50, key is test_row_0/B:col10/1730989052936/Put/seqid=0 2024-11-07T14:17:34,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741851_1027 (size=12001) 2024-11-07T14:17:34,572 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/552b54050d4647c5889b3c1978d53940 2024-11-07T14:17:34,573 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#13 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:34,574 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/cebf6a695fae43fb8dda0ace61312de9 is 50, key is test_row_0/C:col10/1730989052521/Put/seqid=0 2024-11-07T14:17:34,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741852_1028 (size=12104) 2024-11-07T14:17:34,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/10edcd64df8343268dcc2e35529f938d is 50, key is test_row_0/C:col10/1730989052936/Put/seqid=0 2024-11-07T14:17:34,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741853_1029 (size=12001) 2024-11-07T14:17:34,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T14:17:35,024 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/cebf6a695fae43fb8dda0ace61312de9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/cebf6a695fae43fb8dda0ace61312de9 2024-11-07T14:17:35,029 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/10edcd64df8343268dcc2e35529f938d 2024-11-07T14:17:35,041 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into cebf6a695fae43fb8dda0ace61312de9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:35,041 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:35,041 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=13, startTime=1730989054339; duration=0sec 2024-11-07T14:17:35,042 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:35,042 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:35,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/360f07f8cb1b4079a2641ba99ab6cd1d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d 2024-11-07T14:17:35,053 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d, entries=150, sequenceid=74, filesize=11.7 K 2024-11-07T14:17:35,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/552b54050d4647c5889b3c1978d53940 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940 2024-11-07T14:17:35,067 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940, entries=150, sequenceid=74, filesize=11.7 K 2024-11-07T14:17:35,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/10edcd64df8343268dcc2e35529f938d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d 2024-11-07T14:17:35,080 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d, entries=150, sequenceid=74, filesize=11.7 K 2024-11-07T14:17:35,082 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 5a16e97064ea2ba83f416db90324fc7e in 672ms, sequenceid=74, compaction requested=false 2024-11-07T14:17:35,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:35,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-07T14:17:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-07T14:17:35,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-07T14:17:35,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 984 msec 2024-11-07T14:17:35,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 997 msec 2024-11-07T14:17:35,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:17:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:35,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:35,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/1849150949354d9a8b8fc3dbd4623018 is 50, key is test_row_0/A:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:35,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989115164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741854_1030 (size=21365) 2024-11-07T14:17:35,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989115166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989115168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989115168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989115169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-07T14:17:35,207 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-07T14:17:35,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-07T14:17:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:35,213 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:35,215 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:35,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:35,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989115277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989115279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989115279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989115280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989115281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:35,368 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:35,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:35,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989115485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989115484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989115489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989115486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989115490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:35,523 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:35,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:35,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/1849150949354d9a8b8fc3dbd4623018 2024-11-07T14:17:35,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f14d3fca9e994edda7b602f0e71a2647 is 50, key is test_row_0/B:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741855_1031 (size=12001) 2024-11-07T14:17:35,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f14d3fca9e994edda7b602f0e71a2647 2024-11-07T14:17:35,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:35,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a49d339e4f3745d2833f0b8a2fad9861 is 50, key is test_row_0/C:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:35,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741856_1032 (size=12001) 2024-11-07T14:17:35,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989115790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989115791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989115798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989115799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989115799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:35,836 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:35,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:35,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:35,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:35,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:35,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:35,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:36,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a49d339e4f3745d2833f0b8a2fad9861 2024-11-07T14:17:36,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/1849150949354d9a8b8fc3dbd4623018 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018 2024-11-07T14:17:36,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:36,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:36,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:36,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:36,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:36,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018, entries=350, sequenceid=90, filesize=20.9 K 2024-11-07T14:17:36,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f14d3fca9e994edda7b602f0e71a2647 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647 2024-11-07T14:17:36,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647, entries=150, sequenceid=90, filesize=11.7 K 2024-11-07T14:17:36,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a49d339e4f3745d2833f0b8a2fad9861 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861 2024-11-07T14:17:36,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861, entries=150, sequenceid=90, filesize=11.7 K 2024-11-07T14:17:36,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5a16e97064ea2ba83f416db90324fc7e in 1086ms, sequenceid=90, compaction requested=true 2024-11-07T14:17:36,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:36,200 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:36,203 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45470 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:36,203 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:36,203 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,203 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/db00db09b197479daee8b01310fa4086, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=44.4 K 2024-11-07T14:17:36,204 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting db00db09b197479daee8b01310fa4086, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:36,205 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 360f07f8cb1b4079a2641ba99ab6cd1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1730989052919 2024-11-07T14:17:36,206 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1849150949354d9a8b8fc3dbd4623018, keycount=350, bloomtype=ROW, size=20.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055088 2024-11-07T14:17:36,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:36,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:36,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:36,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:36,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:36,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:36,223 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:36,226 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:36,227 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/f516f1914b1a45b5a90e660b852f9549 is 50, key is test_row_0/A:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:36,231 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:36,231 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:36,232 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,232 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/39eb218744e3469f9e35127787601ebc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.3 K 2024-11-07T14:17:36,233 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 39eb218744e3469f9e35127787601ebc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:36,234 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 552b54050d4647c5889b3c1978d53940, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1730989052919 2024-11-07T14:17:36,235 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f14d3fca9e994edda7b602f0e71a2647, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055110 2024-11-07T14:17:36,283 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:36,285 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/94f77e31d5e34141ad44250912ba8c75 is 50, key is test_row_0/B:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:36,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741857_1033 (size=12207) 2024-11-07T14:17:36,307 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-07T14:17:36,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,309 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:17:36,310 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/f516f1914b1a45b5a90e660b852f9549 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/f516f1914b1a45b5a90e660b852f9549 2024-11-07T14:17:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:36,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:36,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:36,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:36,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:36,325 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into f516f1914b1a45b5a90e660b852f9549(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:36,326 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:36,326 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989056200; duration=0sec 2024-11-07T14:17:36,327 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:36,327 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:36,327 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:36,330 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:36,330 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:36,330 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:36,330 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/cebf6a695fae43fb8dda0ace61312de9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.3 K 2024-11-07T14:17:36,331 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting cebf6a695fae43fb8dda0ace61312de9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989052521 2024-11-07T14:17:36,332 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10edcd64df8343268dcc2e35529f938d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1730989052919 2024-11-07T14:17:36,336 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a49d339e4f3745d2833f0b8a2fad9861, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055110 2024-11-07T14:17:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741858_1034 (size=12207) 2024-11-07T14:17:36,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989116332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989116333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989116336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989116339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989116336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,353 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/94f77e31d5e34141ad44250912ba8c75 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/94f77e31d5e34141ad44250912ba8c75 2024-11-07T14:17:36,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9e814490b2d64edf9db0b0fc00d1b4fa is 50, key is test_row_0/A:col10/1730989055161/Put/seqid=0 2024-11-07T14:17:36,367 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 94f77e31d5e34141ad44250912ba8c75(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:36,368 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:36,368 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989056222; duration=0sec 2024-11-07T14:17:36,368 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:36,368 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:36,378 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:36,379 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/f3aa893977a7471bb00db3d47dc24f2f is 50, key is test_row_0/C:col10/1730989055114/Put/seqid=0 2024-11-07T14:17:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741860_1036 (size=12207) 2024-11-07T14:17:36,446 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/f3aa893977a7471bb00db3d47dc24f2f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f3aa893977a7471bb00db3d47dc24f2f 2024-11-07T14:17:36,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989116443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989116444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741859_1035 (size=12001) 2024-11-07T14:17:36,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989116446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989116446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989116447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,458 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into f3aa893977a7471bb00db3d47dc24f2f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:36,458 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:36,458 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=13, startTime=1730989056223; duration=0sec 2024-11-07T14:17:36,459 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:36,459 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:36,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989116651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989116653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989116656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989116656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989116656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,852 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9e814490b2d64edf9db0b0fc00d1b4fa 2024-11-07T14:17:36,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/24b34518526944dea362474412cec540 is 50, key is test_row_0/B:col10/1730989055161/Put/seqid=0 2024-11-07T14:17:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741861_1037 (size=12001) 2024-11-07T14:17:36,920 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/24b34518526944dea362474412cec540 2024-11-07T14:17:36,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/377e01d97702478eaecaf85d8ff5aeef is 50, key is test_row_0/C:col10/1730989055161/Put/seqid=0 2024-11-07T14:17:36,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989116954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989116960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989116960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989116962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989116962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:36,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741862_1038 (size=12001) 2024-11-07T14:17:36,999 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/377e01d97702478eaecaf85d8ff5aeef 2024-11-07T14:17:37,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9e814490b2d64edf9db0b0fc00d1b4fa as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa 2024-11-07T14:17:37,020 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa, entries=150, sequenceid=115, filesize=11.7 K 2024-11-07T14:17:37,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/24b34518526944dea362474412cec540 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540 2024-11-07T14:17:37,036 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540, entries=150, sequenceid=115, filesize=11.7 K 2024-11-07T14:17:37,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/377e01d97702478eaecaf85d8ff5aeef as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef 2024-11-07T14:17:37,059 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef, entries=150, sequenceid=115, filesize=11.7 K 2024-11-07T14:17:37,060 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5a16e97064ea2ba83f416db90324fc7e in 751ms, sequenceid=115, compaction requested=false 2024-11-07T14:17:37,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:37,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-07T14:17:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-07T14:17:37,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-07T14:17:37,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8480 sec 2024-11-07T14:17:37,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.8560 sec 2024-11-07T14:17:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-07T14:17:37,320 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-07T14:17:37,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-07T14:17:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:37,348 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:37,349 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:37,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:37,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:37,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/5d26bd7599674cadaf6d316b5a3e1d7a is 50, key is test_row_0/A:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:37,503 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T14:17:37,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:37,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741863_1039 (size=12051) 2024-11-07T14:17:37,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/5d26bd7599674cadaf6d316b5a3e1d7a 2024-11-07T14:17:37,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/e93f2df5ec7b41948934bce0a44bd4d9 is 50, key is test_row_0/B:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:37,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989117541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989117541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989117542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989117543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989117544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741864_1040 (size=12051) 2024-11-07T14:17:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:37,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989117649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989117650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989117652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989117651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989117652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,658 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T14:17:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T14:17:37,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:37,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989117853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989117854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989117855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989117856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989117857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:37,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/e93f2df5ec7b41948934bce0a44bd4d9 2024-11-07T14:17:37,970 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:37,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T14:17:37,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:37,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:37,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:38,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4290451b829d4da8b2a58a1353c5df96 is 50, key is test_row_0/C:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:38,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741865_1041 (size=12051) 2024-11-07T14:17:38,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4290451b829d4da8b2a58a1353c5df96 2024-11-07T14:17:38,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/5d26bd7599674cadaf6d316b5a3e1d7a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a 2024-11-07T14:17:38,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T14:17:38,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/e93f2df5ec7b41948934bce0a44bd4d9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9 2024-11-07T14:17:38,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T14:17:38,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4290451b829d4da8b2a58a1353c5df96 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96 2024-11-07T14:17:38,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96, entries=150, sequenceid=130, filesize=11.8 K 2024-11-07T14:17:38,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 5a16e97064ea2ba83f416db90324fc7e in 638ms, sequenceid=130, compaction requested=true 2024-11-07T14:17:38,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:38,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:38,100 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:38,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:38,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:38,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:38,101 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:38,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:38,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:38,103 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:38,103 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:38,103 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:38,103 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:38,103 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:38,104 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/f516f1914b1a45b5a90e660b852f9549, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.4 K 2024-11-07T14:17:38,104 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:38,104 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/94f77e31d5e34141ad44250912ba8c75, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.4 K 2024-11-07T14:17:38,104 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f516f1914b1a45b5a90e660b852f9549, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055110 2024-11-07T14:17:38,105 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e814490b2d64edf9db0b0fc00d1b4fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730989055161 2024-11-07T14:17:38,106 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 94f77e31d5e34141ad44250912ba8c75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055110 2024-11-07T14:17:38,107 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d26bd7599674cadaf6d316b5a3e1d7a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:38,107 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 24b34518526944dea362474412cec540, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730989055161 2024-11-07T14:17:38,108 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e93f2df5ec7b41948934bce0a44bd4d9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:38,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-07T14:17:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:38,129 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:17:38,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,137 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#27 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:38,138 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/33e17e4d37d049a5a54f8363981365db is 50, key is test_row_0/B:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:38,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eb692dd642c74d19832d657e23c8df74 is 50, key is test_row_0/A:col10/1730989057541/Put/seqid=0 2024-11-07T14:17:38,162 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#29 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:38,163 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/436981c5ec2f41aeae45d16137f26500 is 50, key is test_row_0/A:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:38,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:38,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741866_1042 (size=12359) 2024-11-07T14:17:38,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741867_1043 (size=12151) 2024-11-07T14:17:38,190 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/33e17e4d37d049a5a54f8363981365db as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33e17e4d37d049a5a54f8363981365db 2024-11-07T14:17:38,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741868_1044 (size=12359) 2024-11-07T14:17:38,206 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/436981c5ec2f41aeae45d16137f26500 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/436981c5ec2f41aeae45d16137f26500 2024-11-07T14:17:38,232 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 436981c5ec2f41aeae45d16137f26500(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:38,232 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 33e17e4d37d049a5a54f8363981365db(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:38,233 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:38,233 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:38,233 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989058100; duration=0sec 2024-11-07T14:17:38,233 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989058101; duration=0sec 2024-11-07T14:17:38,233 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:38,233 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:38,233 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:38,234 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:38,234 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:38,236 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:38,236 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:38,236 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:38,236 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f3aa893977a7471bb00db3d47dc24f2f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.4 K 2024-11-07T14:17:38,237 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3aa893977a7471bb00db3d47dc24f2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1730989055110 2024-11-07T14:17:38,237 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 377e01d97702478eaecaf85d8ff5aeef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1730989055161 2024-11-07T14:17:38,238 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4290451b829d4da8b2a58a1353c5df96, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:38,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989118239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989118246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989118246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989118249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989118250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,264 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:38,265 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/b1c13821730149fc88e22749e21978f9 is 50, key is test_row_0/C:col10/1730989056337/Put/seqid=0 2024-11-07T14:17:38,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741869_1045 (size=12359) 2024-11-07T14:17:38,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989118351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989118354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989118355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989118360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989118359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:38,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989118555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989118559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989118560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989118566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989118566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,594 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eb692dd642c74d19832d657e23c8df74 2024-11-07T14:17:38,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/212b74391879409bba3aca3671ef9f1e is 50, key is test_row_0/B:col10/1730989057541/Put/seqid=0 2024-11-07T14:17:38,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741870_1046 (size=12151) 2024-11-07T14:17:38,649 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/212b74391879409bba3aca3671ef9f1e 2024-11-07T14:17:38,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/56f8ef401b7f4ebb9d5957edda0c752c is 50, key is test_row_0/C:col10/1730989057541/Put/seqid=0 2024-11-07T14:17:38,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741871_1047 (size=12151) 2024-11-07T14:17:38,720 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/56f8ef401b7f4ebb9d5957edda0c752c 2024-11-07T14:17:38,734 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/b1c13821730149fc88e22749e21978f9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/b1c13821730149fc88e22749e21978f9 2024-11-07T14:17:38,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eb692dd642c74d19832d657e23c8df74 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74 2024-11-07T14:17:38,745 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into b1c13821730149fc88e22749e21978f9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:38,745 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:38,745 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=13, startTime=1730989058101; duration=0sec 2024-11-07T14:17:38,746 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:38,746 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:38,751 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74, entries=150, sequenceid=155, filesize=11.9 K 2024-11-07T14:17:38,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/212b74391879409bba3aca3671ef9f1e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e 2024-11-07T14:17:38,769 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e, entries=150, sequenceid=155, filesize=11.9 K 2024-11-07T14:17:38,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/56f8ef401b7f4ebb9d5957edda0c752c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c 2024-11-07T14:17:38,781 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c, entries=150, sequenceid=155, filesize=11.9 K 2024-11-07T14:17:38,783 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 5a16e97064ea2ba83f416db90324fc7e in 654ms, sequenceid=155, compaction requested=false 2024-11-07T14:17:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-07T14:17:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-07T14:17:38,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-07T14:17:38,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4370 sec 2024-11-07T14:17:38,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.4500 sec 2024-11-07T14:17:38,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:17:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:38,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:38,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:38,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:38,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:38,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/bfedfb975a224805a0a26a0f9741f89f is 50, key is test_row_0/A:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:38,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741872_1048 (size=12151) 2024-11-07T14:17:38,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/bfedfb975a224805a0a26a0f9741f89f 2024-11-07T14:17:38,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/174e4565fa204e24b2775b8329c4a61d is 50, key is test_row_0/B:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:38,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989118915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989118918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989118921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989118928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989118925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:38,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741873_1049 (size=12151) 2024-11-07T14:17:39,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989119027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989119030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989119039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989119039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989119050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989119233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989119235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989119244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989119245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989119253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/174e4565fa204e24b2775b8329c4a61d 2024-11-07T14:17:39,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/f5454a88e7f547babfc65fd29fe64547 is 50, key is test_row_0/C:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:39,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-07T14:17:39,458 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-07T14:17:39,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:39,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-07T14:17:39,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:39,469 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:39,472 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:39,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741874_1050 (size=12151) 2024-11-07T14:17:39,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:39,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/f5454a88e7f547babfc65fd29fe64547 2024-11-07T14:17:39,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/bfedfb975a224805a0a26a0f9741f89f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f 2024-11-07T14:17:39,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f, entries=150, sequenceid=172, filesize=11.9 K 2024-11-07T14:17:39,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/174e4565fa204e24b2775b8329c4a61d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d 2024-11-07T14:17:39,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d, entries=150, sequenceid=172, filesize=11.9 K 2024-11-07T14:17:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/f5454a88e7f547babfc65fd29fe64547 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547 2024-11-07T14:17:39,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547, entries=150, sequenceid=172, filesize=11.9 K 2024-11-07T14:17:39,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 5a16e97064ea2ba83f416db90324fc7e in 664ms, sequenceid=172, compaction requested=true 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:39,535 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:39,535 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:39,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:39,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:39,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:39,539 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,539 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33e17e4d37d049a5a54f8363981365db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.8 K 2024-11-07T14:17:39,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:39,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:39,542 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,542 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/436981c5ec2f41aeae45d16137f26500, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.8 K 2024-11-07T14:17:39,543 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 33e17e4d37d049a5a54f8363981365db, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:39,543 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 436981c5ec2f41aeae45d16137f26500, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:39,545 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 212b74391879409bba3aca3671ef9f1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730989057516 2024-11-07T14:17:39,545 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb692dd642c74d19832d657e23c8df74, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730989057516 2024-11-07T14:17:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:39,546 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 174e4565fa204e24b2775b8329c4a61d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:39,547 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfedfb975a224805a0a26a0f9741f89f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:39,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:17:39,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:39,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:39,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:39,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:39,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:39,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:39,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:39,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989119568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989119569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,576 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:39,577 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/731a0687a4a941e89f109c3d51308e75 is 50, key is test_row_0/B:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:39,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989119571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,578 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#37 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:39,578 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/69ae0789e6fb4814aec127e1ffefd57c is 50, key is test_row_0/A:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:39,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989119571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989119572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/3248e79ccd4b4c2d90e498a668d2ec5b is 50, key is test_row_0/A:col10/1730989059547/Put/seqid=0 2024-11-07T14:17:39,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741876_1052 (size=12561) 2024-11-07T14:17:39,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:39,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:39,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741875_1051 (size=12561) 2024-11-07T14:17:39,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741877_1053 (size=12151) 2024-11-07T14:17:39,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/3248e79ccd4b4c2d90e498a668d2ec5b 2024-11-07T14:17:39,667 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/69ae0789e6fb4814aec127e1ffefd57c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/69ae0789e6fb4814aec127e1ffefd57c 2024-11-07T14:17:39,668 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/731a0687a4a941e89f109c3d51308e75 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/731a0687a4a941e89f109c3d51308e75 2024-11-07T14:17:39,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/dbb6e569365f47e29e2f13e59d5eb66e is 50, key is test_row_0/B:col10/1730989059547/Put/seqid=0 2024-11-07T14:17:39,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989119675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,679 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 69ae0789e6fb4814aec127e1ffefd57c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:39,679 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:39,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989119675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,679 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989059535; duration=0sec 2024-11-07T14:17:39,679 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:39,681 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:39,682 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:39,683 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:39,684 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:39,684 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,684 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 731a0687a4a941e89f109c3d51308e75(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:39,684 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/b1c13821730149fc88e22749e21978f9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=35.8 K 2024-11-07T14:17:39,684 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:39,684 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989059535; duration=0sec 2024-11-07T14:17:39,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989119680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:39,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:39,685 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1c13821730149fc88e22749e21978f9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1730989056334 2024-11-07T14:17:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989119681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989119682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,686 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56f8ef401b7f4ebb9d5957edda0c752c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1730989057516 2024-11-07T14:17:39,687 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5454a88e7f547babfc65fd29fe64547, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:39,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741878_1054 (size=12151) 2024-11-07T14:17:39,717 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:39,718 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/db84c37e775a4fa19f9f8390fa8aee77 is 50, key is test_row_0/C:col10/1730989058868/Put/seqid=0 2024-11-07T14:17:39,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741879_1055 (size=12561) 2024-11-07T14:17:39,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:39,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:39,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:39,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,791 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/db84c37e775a4fa19f9f8390fa8aee77 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/db84c37e775a4fa19f9f8390fa8aee77 2024-11-07T14:17:39,814 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into db84c37e775a4fa19f9f8390fa8aee77(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:39,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:39,814 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=13, startTime=1730989059535; duration=0sec 2024-11-07T14:17:39,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:39,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:39,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989119881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989119881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989119886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989119888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:39,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989119889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,937 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:39,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:39,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:40,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:40,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:40,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:40,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:40,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/dbb6e569365f47e29e2f13e59d5eb66e 2024-11-07T14:17:40,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/ef05161a23db4647bde0c6be876f68e9 is 50, key is test_row_0/C:col10/1730989059547/Put/seqid=0 2024-11-07T14:17:40,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989120186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989120187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989120189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989120193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989120193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741880_1056 (size=12151) 2024-11-07T14:17:40,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/ef05161a23db4647bde0c6be876f68e9 2024-11-07T14:17:40,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/3248e79ccd4b4c2d90e498a668d2ec5b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b 2024-11-07T14:17:40,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b, entries=150, sequenceid=199, filesize=11.9 K 2024-11-07T14:17:40,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/dbb6e569365f47e29e2f13e59d5eb66e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e 2024-11-07T14:17:40,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e, entries=150, sequenceid=199, filesize=11.9 K 2024-11-07T14:17:40,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:40,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:40,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:40,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:40,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/ef05161a23db4647bde0c6be876f68e9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9 2024-11-07T14:17:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:40,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9, entries=150, sequenceid=199, filesize=11.9 K 2024-11-07T14:17:40,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 5a16e97064ea2ba83f416db90324fc7e in 718ms, sequenceid=199, compaction requested=false 2024-11-07T14:17:40,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:40,402 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-07T14:17:40,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:40,403 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T14:17:40,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:40,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:40,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:40,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:40,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:40,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:40,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/14d0c1425be244a39be7b29f8abf94fd is 50, key is test_row_0/A:col10/1730989059569/Put/seqid=0 2024-11-07T14:17:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741881_1057 (size=12151) 2024-11-07T14:17:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:40,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989120728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989120730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989120745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989120761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989120761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,836 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/14d0c1425be244a39be7b29f8abf94fd 2024-11-07T14:17:40,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989120859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989120863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989120863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989120866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989120866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:40,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/4160d6f028044bb4998d37a44a3f7699 is 50, key is test_row_0/B:col10/1730989059569/Put/seqid=0 2024-11-07T14:17:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741882_1058 (size=12151) 2024-11-07T14:17:41,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989121065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989121072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989121076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989121077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989121078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,312 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/4160d6f028044bb4998d37a44a3f7699 2024-11-07T14:17:41,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4d15a9d101444bffa9e535339b0a8c87 is 50, key is test_row_0/C:col10/1730989059569/Put/seqid=0 2024-11-07T14:17:41,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741883_1059 (size=12151) 2024-11-07T14:17:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989121369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989121379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989121385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989121386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989121386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:41,761 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4d15a9d101444bffa9e535339b0a8c87 2024-11-07T14:17:41,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/14d0c1425be244a39be7b29f8abf94fd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd 2024-11-07T14:17:41,779 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd, entries=150, sequenceid=212, filesize=11.9 K 2024-11-07T14:17:41,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/4160d6f028044bb4998d37a44a3f7699 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699 2024-11-07T14:17:41,789 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699, entries=150, sequenceid=212, filesize=11.9 K 2024-11-07T14:17:41,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4d15a9d101444bffa9e535339b0a8c87 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87 2024-11-07T14:17:41,799 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87, entries=150, sequenceid=212, filesize=11.9 K 2024-11-07T14:17:41,801 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=167.72 KB/171750 for 5a16e97064ea2ba83f416db90324fc7e in 1398ms, sequenceid=212, compaction requested=true 2024-11-07T14:17:41,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:41,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:41,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-07T14:17:41,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-07T14:17:41,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-07T14:17:41,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3320 sec 2024-11-07T14:17:41,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.3470 sec 2024-11-07T14:17:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:41,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:41,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:41,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989121890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2a49e9645aff469f9ce928def847f888 is 50, key is test_row_0/A:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:41,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989121894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989121912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989121911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989121913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:41,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741884_1060 (size=14541) 2024-11-07T14:17:41,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2a49e9645aff469f9ce928def847f888 2024-11-07T14:17:41,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/eadb7bea8db44e56ba145ba6230c7d59 is 50, key is test_row_0/B:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:41,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741885_1061 (size=12151) 2024-11-07T14:17:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989121994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989122016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989122017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989122198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989122219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989122221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/eadb7bea8db44e56ba145ba6230c7d59 2024-11-07T14:17:42,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b is 50, key is test_row_0/C:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:42,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741886_1062 (size=12151) 2024-11-07T14:17:42,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b 2024-11-07T14:17:42,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2a49e9645aff469f9ce928def847f888 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888 2024-11-07T14:17:42,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888, entries=200, sequenceid=241, filesize=14.2 K 2024-11-07T14:17:42,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/eadb7bea8db44e56ba145ba6230c7d59 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59 2024-11-07T14:17:42,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59, entries=150, sequenceid=241, filesize=11.9 K 2024-11-07T14:17:42,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b 2024-11-07T14:17:42,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b, entries=150, sequenceid=241, filesize=11.9 K 2024-11-07T14:17:42,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 5a16e97064ea2ba83f416db90324fc7e in 570ms, sequenceid=241, compaction requested=true 2024-11-07T14:17:42,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,455 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:17:42,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:42,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:42,456 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:17:42,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:42,458 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:17:42,459 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:42,459 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:42,459 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/731a0687a4a941e89f109c3d51308e75, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=47.9 K 2024-11-07T14:17:42,458 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:17:42,459 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:42,460 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:42,460 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/69ae0789e6fb4814aec127e1ffefd57c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=50.2 K 2024-11-07T14:17:42,460 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 731a0687a4a941e89f109c3d51308e75, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:42,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:42,462 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69ae0789e6fb4814aec127e1ffefd57c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:42,462 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting dbb6e569365f47e29e2f13e59d5eb66e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989058919 2024-11-07T14:17:42,462 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4160d6f028044bb4998d37a44a3f7699, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989059562 2024-11-07T14:17:42,462 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3248e79ccd4b4c2d90e498a668d2ec5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989058919 2024-11-07T14:17:42,463 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting eadb7bea8db44e56ba145ba6230c7d59, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:42,463 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14d0c1425be244a39be7b29f8abf94fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989059562 2024-11-07T14:17:42,464 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a49e9645aff469f9ce928def847f888, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:42,482 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:42,483 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2dff8070276e4a1cb9b02044dd54b3d3 is 50, key is test_row_0/A:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:42,487 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:42,488 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/42ead432e7ea49b18507cf7810db8d42 is 50, key is test_row_0/B:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:42,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741887_1063 (size=12697) 2024-11-07T14:17:42,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741888_1064 (size=12697) 2024-11-07T14:17:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:42,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:17:42,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:42,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:42,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:42,520 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2dff8070276e4a1cb9b02044dd54b3d3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2dff8070276e4a1cb9b02044dd54b3d3 2024-11-07T14:17:42,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,529 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/42ead432e7ea49b18507cf7810db8d42 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/42ead432e7ea49b18507cf7810db8d42 2024-11-07T14:17:42,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/53b1d013bf1a48508c5aa4dc53717c6b is 50, key is test_row_0/A:col10/1730989062517/Put/seqid=0 2024-11-07T14:17:42,533 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 2dff8070276e4a1cb9b02044dd54b3d3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:42,533 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,533 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=12, startTime=1730989062455; duration=0sec 2024-11-07T14:17:42,534 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:42,534 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:42,534 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:17:42,536 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:17:42,536 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:42,536 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:42,537 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/db84c37e775a4fa19f9f8390fa8aee77, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=47.9 K 2024-11-07T14:17:42,537 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting db84c37e775a4fa19f9f8390fa8aee77, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1730989058246 2024-11-07T14:17:42,540 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 42ead432e7ea49b18507cf7810db8d42(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:42,540 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,540 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=12, startTime=1730989062456; duration=0sec 2024-11-07T14:17:42,540 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef05161a23db4647bde0c6be876f68e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989058919 2024-11-07T14:17:42,540 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d15a9d101444bffa9e535339b0a8c87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989059562 2024-11-07T14:17:42,540 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:42,540 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:42,541 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c7eacd8ce8f4fd9a1a75701e0a89a4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:42,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741889_1065 (size=14541) 2024-11-07T14:17:42,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/53b1d013bf1a48508c5aa4dc53717c6b 2024-11-07T14:17:42,570 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#51 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:42,570 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/6884a53971644e6da4b4442b3264c50c is 50, key is test_row_0/C:col10/1730989060727/Put/seqid=0 2024-11-07T14:17:42,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/67100ea7d624484ead92ae661508a97a is 50, key is test_row_0/B:col10/1730989062517/Put/seqid=0 2024-11-07T14:17:42,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741890_1066 (size=12697) 2024-11-07T14:17:42,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989122579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989122583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989122584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741891_1067 (size=12151) 2024-11-07T14:17:42,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/67100ea7d624484ead92ae661508a97a 2024-11-07T14:17:42,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/640e60cc3f9f41538332554f8887bc14 is 50, key is test_row_0/C:col10/1730989062517/Put/seqid=0 2024-11-07T14:17:42,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741892_1068 (size=12151) 2024-11-07T14:17:42,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/640e60cc3f9f41538332554f8887bc14 2024-11-07T14:17:42,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/53b1d013bf1a48508c5aa4dc53717c6b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b 2024-11-07T14:17:42,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b, entries=200, sequenceid=252, filesize=14.2 K 2024-11-07T14:17:42,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/67100ea7d624484ead92ae661508a97a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a 2024-11-07T14:17:42,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:17:42,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/640e60cc3f9f41538332554f8887bc14 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14 2024-11-07T14:17:42,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:17:42,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5a16e97064ea2ba83f416db90324fc7e in 163ms, sequenceid=252, compaction requested=false 2024-11-07T14:17:42,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:42,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:17:42,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/68d3a8e95167422e852b008d4b979c5a is 50, key is test_row_0/A:col10/1730989062582/Put/seqid=0 2024-11-07T14:17:42,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989122701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989122702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989122703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741893_1069 (size=17181) 2024-11-07T14:17:42,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/68d3a8e95167422e852b008d4b979c5a 2024-11-07T14:17:42,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ef6916e1510b4ed9821569b2ae4ee84c is 50, key is test_row_0/B:col10/1730989062582/Put/seqid=0 2024-11-07T14:17:42,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741894_1070 (size=12301) 2024-11-07T14:17:42,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ef6916e1510b4ed9821569b2ae4ee84c 2024-11-07T14:17:42,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/248502cd209746a3ba90733718e24d38 is 50, key is test_row_0/C:col10/1730989062582/Put/seqid=0 2024-11-07T14:17:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741895_1071 (size=12301) 2024-11-07T14:17:42,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/248502cd209746a3ba90733718e24d38 2024-11-07T14:17:42,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989122806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/68d3a8e95167422e852b008d4b979c5a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a 2024-11-07T14:17:42,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989122810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989122810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a, entries=250, sequenceid=281, filesize=16.8 K 2024-11-07T14:17:42,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ef6916e1510b4ed9821569b2ae4ee84c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c 2024-11-07T14:17:42,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c, entries=150, sequenceid=281, filesize=12.0 K 2024-11-07T14:17:42,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/248502cd209746a3ba90733718e24d38 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38 2024-11-07T14:17:42,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38, entries=150, sequenceid=281, filesize=12.0 K 2024-11-07T14:17:42,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 5a16e97064ea2ba83f416db90324fc7e in 147ms, sequenceid=281, compaction requested=true 2024-11-07T14:17:42,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,837 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:42,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:42,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:42,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:42,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:42,840 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44419 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:42,840 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:42,840 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:42,840 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2dff8070276e4a1cb9b02044dd54b3d3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=43.4 K 2024-11-07T14:17:42,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:42,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:17:42,841 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dff8070276e4a1cb9b02044dd54b3d3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:42,841 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 53b1d013bf1a48508c5aa4dc53717c6b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989061889 2024-11-07T14:17:42,842 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 68d3a8e95167422e852b008d4b979c5a, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1730989062560 2024-11-07T14:17:42,854 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#57 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:42,855 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/0e2d862bc983479e8ed33f55e9fc29b5 is 50, key is test_row_0/A:col10/1730989062582/Put/seqid=0 2024-11-07T14:17:42,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741896_1072 (size=12949) 2024-11-07T14:17:42,872 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/0e2d862bc983479e8ed33f55e9fc29b5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0e2d862bc983479e8ed33f55e9fc29b5 2024-11-07T14:17:42,880 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 0e2d862bc983479e8ed33f55e9fc29b5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:42,880 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,880 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989062837; duration=0sec 2024-11-07T14:17:42,880 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:17:42,880 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:42,880 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-07T14:17:42,881 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T14:17:42,881 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T14:17:42,881 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. because compaction request was cancelled 2024-11-07T14:17:42,881 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:42,881 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:42,883 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:42,883 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:42,883 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:42,883 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/42ead432e7ea49b18507cf7810db8d42, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.3 K 2024-11-07T14:17:42,883 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 42ead432e7ea49b18507cf7810db8d42, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:42,884 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 67100ea7d624484ead92ae661508a97a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989061889 2024-11-07T14:17:42,884 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ef6916e1510b4ed9821569b2ae4ee84c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1730989062582 2024-11-07T14:17:42,916 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:42,917 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae is 50, key is test_row_0/B:col10/1730989062582/Put/seqid=0 2024-11-07T14:17:42,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:17:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:42,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741897_1073 (size=12949) 2024-11-07T14:17:42,952 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae 2024-11-07T14:17:42,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/60b43c8945224fc6bc0abe36165f0895 is 50, key is test_row_0/A:col10/1730989062922/Put/seqid=0 2024-11-07T14:17:42,964 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 4e0ad9b8b90b41cfba8c3776fc2ed0ae(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:42,964 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:42,964 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989062838; duration=0sec 2024-11-07T14:17:42,964 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:42,964 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:42,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741898_1074 (size=12301) 2024-11-07T14:17:42,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/60b43c8945224fc6bc0abe36165f0895 2024-11-07T14:17:42,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989122989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,991 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/6884a53971644e6da4b4442b3264c50c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/6884a53971644e6da4b4442b3264c50c 2024-11-07T14:17:42,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989122991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:42,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ac897d0a5dbb4c878ae012056379ab35 is 50, key is test_row_0/B:col10/1730989062922/Put/seqid=0 2024-11-07T14:17:43,003 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into 6884a53971644e6da4b4442b3264c50c(size=12.4 K), total size for store is 36.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:43,003 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:43,003 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=12, startTime=1730989062460; duration=0sec 2024-11-07T14:17:43,003 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:43,003 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:43,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741899_1075 (size=12301) 2024-11-07T14:17:43,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ac897d0a5dbb4c878ae012056379ab35 2024-11-07T14:17:43,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989123009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989123015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989123016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a6fa5f568a7b426c87e71e72841b4b8b is 50, key is test_row_0/C:col10/1730989062922/Put/seqid=0 2024-11-07T14:17:43,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741900_1076 (size=12301) 2024-11-07T14:17:43,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a6fa5f568a7b426c87e71e72841b4b8b 2024-11-07T14:17:43,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/60b43c8945224fc6bc0abe36165f0895 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895 2024-11-07T14:17:43,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895, entries=150, sequenceid=294, filesize=12.0 K 2024-11-07T14:17:43,052 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-07T14:17:43,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ac897d0a5dbb4c878ae012056379ab35 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35 2024-11-07T14:17:43,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35, entries=150, sequenceid=294, filesize=12.0 K 2024-11-07T14:17:43,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/a6fa5f568a7b426c87e71e72841b4b8b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b 2024-11-07T14:17:43,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b, entries=150, sequenceid=294, filesize=12.0 K 2024-11-07T14:17:43,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 5a16e97064ea2ba83f416db90324fc7e in 147ms, sequenceid=294, compaction requested=true 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:43,075 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:17:43,075 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:17:43,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T14:17:43,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T14:17:43,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. because compaction request was cancelled 2024-11-07T14:17:43,077 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:43,077 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. because compaction request was cancelled 2024-11-07T14:17:43,079 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:43,079 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:43,079 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/6884a53971644e6da4b4442b3264c50c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=48.3 K 2024-11-07T14:17:43,080 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6884a53971644e6da4b4442b3264c50c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1730989060727 2024-11-07T14:17:43,080 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 640e60cc3f9f41538332554f8887bc14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989061889 2024-11-07T14:17:43,081 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 248502cd209746a3ba90733718e24d38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1730989062582 2024-11-07T14:17:43,082 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a6fa5f568a7b426c87e71e72841b4b8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1730989062692 2024-11-07T14:17:43,096 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#62 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:43,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:43,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/310bee2df51b493c8b8df646a062464f is 50, key is test_row_0/C:col10/1730989062922/Put/seqid=0 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:43,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:43,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989123116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989123117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eab722de001c4e29a38c165b58384521 is 50, key is test_row_0/A:col10/1730989062975/Put/seqid=0 2024-11-07T14:17:43,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741901_1077 (size=12983) 2024-11-07T14:17:43,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741902_1078 (size=14741) 2024-11-07T14:17:43,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eab722de001c4e29a38c165b58384521 2024-11-07T14:17:43,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/d90dc46c2cb34381a8f6eb36a7c5c328 is 50, key is test_row_0/B:col10/1730989062975/Put/seqid=0 2024-11-07T14:17:43,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741903_1079 (size=12301) 2024-11-07T14:17:43,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989123220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989123221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989123315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989123321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989123322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989123425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989123425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/310bee2df51b493c8b8df646a062464f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/310bee2df51b493c8b8df646a062464f 2024-11-07T14:17:43,550 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into 310bee2df51b493c8b8df646a062464f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:43,550 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:43,550 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=12, startTime=1730989063075; duration=0sec 2024-11-07T14:17:43,550 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:43,550 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:43,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/d90dc46c2cb34381a8f6eb36a7c5c328 2024-11-07T14:17:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-07T14:17:43,576 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-07T14:17:43,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-07T14:17:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:43,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4fcc855f403743229527a9e9690cdde5 is 50, key is test_row_0/C:col10/1730989062975/Put/seqid=0 2024-11-07T14:17:43,582 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:43,583 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:43,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741904_1080 (size=12301) 2024-11-07T14:17:43,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:43,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989123729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989123730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,735 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T14:17:43,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:43,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:43,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:43,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989123821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989123825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989123829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:43,888 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:43,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T14:17:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:43,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:43,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4fcc855f403743229527a9e9690cdde5 2024-11-07T14:17:44,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/eab722de001c4e29a38c165b58384521 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521 2024-11-07T14:17:44,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521, entries=200, sequenceid=321, filesize=14.4 K 2024-11-07T14:17:44,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/d90dc46c2cb34381a8f6eb36a7c5c328 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328 2024-11-07T14:17:44,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328, entries=150, sequenceid=321, filesize=12.0 K 2024-11-07T14:17:44,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/4fcc855f403743229527a9e9690cdde5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5 2024-11-07T14:17:44,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5, entries=150, sequenceid=321, filesize=12.0 K 2024-11-07T14:17:44,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 5a16e97064ea2ba83f416db90324fc7e in 932ms, sequenceid=321, compaction requested=true 2024-11-07T14:17:44,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:44,029 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:44,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:44,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:44,029 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:44,031 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:44,031 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:44,031 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:44,031 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0e2d862bc983479e8ed33f55e9fc29b5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=39.1 K 2024-11-07T14:17:44,032 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:44,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:44,032 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:44,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:44,032 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:44,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:44,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:44,032 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.7 K 2024-11-07T14:17:44,032 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e2d862bc983479e8ed33f55e9fc29b5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1730989062582 2024-11-07T14:17:44,033 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e0ad9b8b90b41cfba8c3776fc2ed0ae, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1730989062582 2024-11-07T14:17:44,033 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60b43c8945224fc6bc0abe36165f0895, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1730989062692 2024-11-07T14:17:44,034 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ac897d0a5dbb4c878ae012056379ab35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1730989062692 2024-11-07T14:17:44,034 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting eab722de001c4e29a38c165b58384521, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1730989062975 2024-11-07T14:17:44,035 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d90dc46c2cb34381a8f6eb36a7c5c328, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1730989062975 2024-11-07T14:17:44,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-07T14:17:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:44,044 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:17:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:44,047 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:44,048 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:44,049 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/703dbf5502c34276842850f13bc1d162 is 50, key is test_row_0/A:col10/1730989062975/Put/seqid=0 2024-11-07T14:17:44,049 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/33466e9a1f114ade98453edb06f67dcc is 50, key is test_row_0/B:col10/1730989062975/Put/seqid=0 2024-11-07T14:17:44,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b853de66d18f4339b4c5bb1d14b38314 is 50, key is test_row_0/A:col10/1730989063113/Put/seqid=0 2024-11-07T14:17:44,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741906_1082 (size=13051) 2024-11-07T14:17:44,081 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/33466e9a1f114ade98453edb06f67dcc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33466e9a1f114ade98453edb06f67dcc 2024-11-07T14:17:44,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741907_1083 (size=12301) 2024-11-07T14:17:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741905_1081 (size=13051) 2024-11-07T14:17:44,090 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b853de66d18f4339b4c5bb1d14b38314 2024-11-07T14:17:44,096 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 33466e9a1f114ade98453edb06f67dcc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:44,096 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:44,097 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989064029; duration=0sec 2024-11-07T14:17:44,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:44,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:44,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T14:17:44,098 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T14:17:44,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T14:17:44,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. because compaction request was cancelled 2024-11-07T14:17:44,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:44,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/95202dea5d374a498a10beb23279e8a8 is 50, key is test_row_0/B:col10/1730989063113/Put/seqid=0 2024-11-07T14:17:44,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741908_1084 (size=12301) 2024-11-07T14:17:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:44,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989124277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989124277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989124379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989124380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,508 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/95202dea5d374a498a10beb23279e8a8 2024-11-07T14:17:44,510 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/703dbf5502c34276842850f13bc1d162 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/703dbf5502c34276842850f13bc1d162 2024-11-07T14:17:44,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/e7ae01c4efc5408e8824f3d29c96e287 is 50, key is test_row_0/C:col10/1730989063113/Put/seqid=0 2024-11-07T14:17:44,518 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 703dbf5502c34276842850f13bc1d162(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:44,519 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:44,519 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989064029; duration=0sec 2024-11-07T14:17:44,519 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:44,519 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:44,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741909_1085 (size=12301) 2024-11-07T14:17:44,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989124583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989124583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:44,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989124827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989124831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989124832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989124885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:44,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989124888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:44,930 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/e7ae01c4efc5408e8824f3d29c96e287 2024-11-07T14:17:44,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b853de66d18f4339b4c5bb1d14b38314 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314 2024-11-07T14:17:44,950 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T14:17:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/95202dea5d374a498a10beb23279e8a8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8 2024-11-07T14:17:44,957 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T14:17:44,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/e7ae01c4efc5408e8824f3d29c96e287 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287 2024-11-07T14:17:44,967 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287, entries=150, sequenceid=333, filesize=12.0 K 2024-11-07T14:17:44,968 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5a16e97064ea2ba83f416db90324fc7e in 924ms, sequenceid=333, compaction requested=true 2024-11-07T14:17:44,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:44,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:44,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-07T14:17:44,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-07T14:17:44,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-07T14:17:44,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3870 sec 2024-11-07T14:17:44,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.3950 sec 2024-11-07T14:17:45,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:45,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:45,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:45,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b089d264503d4a2392bf3d89f831a678 is 50, key is test_row_0/A:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:45,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741910_1086 (size=12301) 2024-11-07T14:17:45,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b089d264503d4a2392bf3d89f831a678 2024-11-07T14:17:45,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989125403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989125404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/25dc626570d7499ab24fdbaf8f3999fb is 50, key is test_row_0/B:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:45,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741911_1087 (size=12301) 2024-11-07T14:17:45,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989125510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989125513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-07T14:17:45,686 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-07T14:17:45,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-07T14:17:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:45,689 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:45,690 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:45,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:45,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989125713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989125715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:45,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/25dc626570d7499ab24fdbaf8f3999fb 2024-11-07T14:17:45,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/d88cfd67b4ce4e698f8e85733c7625bd is 50, key is test_row_0/C:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:45,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741912_1088 (size=12301) 2024-11-07T14:17:45,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-07T14:17:45,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:45,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:45,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:45,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:45,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:45,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:45,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-07T14:17:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:45,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:46,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989126017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989126019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-07T14:17:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:46,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/d88cfd67b4ce4e698f8e85733c7625bd 2024-11-07T14:17:46,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b089d264503d4a2392bf3d89f831a678 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678 2024-11-07T14:17:46,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678, entries=150, sequenceid=361, filesize=12.0 K 2024-11-07T14:17:46,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/25dc626570d7499ab24fdbaf8f3999fb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb 2024-11-07T14:17:46,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb, entries=150, sequenceid=361, filesize=12.0 K 2024-11-07T14:17:46,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/d88cfd67b4ce4e698f8e85733c7625bd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd 2024-11-07T14:17:46,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd, entries=150, sequenceid=361, filesize=12.0 K 2024-11-07T14:17:46,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 5a16e97064ea2ba83f416db90324fc7e in 879ms, sequenceid=361, compaction requested=true 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:46,270 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:46,270 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:46,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:46,271 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:46,271 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:46,271 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:46,271 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:46,272 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,272 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,272 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/703dbf5502c34276842850f13bc1d162, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.8 K 2024-11-07T14:17:46,272 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33466e9a1f114ade98453edb06f67dcc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.8 K 2024-11-07T14:17:46,272 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 703dbf5502c34276842850f13bc1d162, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1730989062975 2024-11-07T14:17:46,272 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 33466e9a1f114ade98453edb06f67dcc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1730989062975 2024-11-07T14:17:46,273 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b853de66d18f4339b4c5bb1d14b38314, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730989063103 2024-11-07T14:17:46,273 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 95202dea5d374a498a10beb23279e8a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730989063103 2024-11-07T14:17:46,274 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b089d264503d4a2392bf3d89f831a678, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:46,274 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 25dc626570d7499ab24fdbaf8f3999fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:46,298 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#74 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:46,299 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#75 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:46,299 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/19754ba997914d4281c5e76bc8595eca is 50, key is test_row_0/B:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:46,299 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/ee1d2d6b15c34bf1af8e2602eb1b55d8 is 50, key is test_row_0/A:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:46,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-07T14:17:46,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,304 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T14:17:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:46,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741914_1090 (size=13153) 2024-11-07T14:17:46,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9758aca211c44ee784de13dc72925586 is 50, key is test_row_0/A:col10/1730989065395/Put/seqid=0 2024-11-07T14:17:46,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/ee1d2d6b15c34bf1af8e2602eb1b55d8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/ee1d2d6b15c34bf1af8e2602eb1b55d8 2024-11-07T14:17:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741913_1089 (size=13153) 2024-11-07T14:17:46,328 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into ee1d2d6b15c34bf1af8e2602eb1b55d8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:46,328 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:46,328 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989066270; duration=0sec 2024-11-07T14:17:46,328 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:46,328 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:46,329 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:17:46,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741915_1091 (size=12301) 2024-11-07T14:17:46,333 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:17:46,333 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:46,334 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,334 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/310bee2df51b493c8b8df646a062464f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=48.7 K 2024-11-07T14:17:46,334 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/19754ba997914d4281c5e76bc8595eca as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/19754ba997914d4281c5e76bc8595eca 2024-11-07T14:17:46,335 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9758aca211c44ee784de13dc72925586 2024-11-07T14:17:46,335 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 310bee2df51b493c8b8df646a062464f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1730989062692 2024-11-07T14:17:46,336 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fcc855f403743229527a9e9690cdde5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1730989062975 2024-11-07T14:17:46,337 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7ae01c4efc5408e8824f3d29c96e287, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1730989063103 2024-11-07T14:17:46,339 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d88cfd67b4ce4e698f8e85733c7625bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:46,347 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into 19754ba997914d4281c5e76bc8595eca(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:46,347 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:46,347 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989066270; duration=0sec 2024-11-07T14:17:46,347 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:46,347 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:46,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/5983bbf02e5a49a0b9a7c8c86cacaaee is 50, key is test_row_0/B:col10/1730989065395/Put/seqid=0 2024-11-07T14:17:46,361 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:46,362 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/9d23ce0c89864264805d48608d3583d7 is 50, key is test_row_0/C:col10/1730989065388/Put/seqid=0 2024-11-07T14:17:46,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741916_1092 (size=12301) 2024-11-07T14:17:46,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741917_1093 (size=13119) 2024-11-07T14:17:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:46,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:46,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989126547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989126547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989126649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989126649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,766 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/5983bbf02e5a49a0b9a7c8c86cacaaee 2024-11-07T14:17:46,776 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/9d23ce0c89864264805d48608d3583d7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/9d23ce0c89864264805d48608d3583d7 2024-11-07T14:17:46,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/30c2209ead2e41edb53358ac240169c1 is 50, key is test_row_0/C:col10/1730989065395/Put/seqid=0 2024-11-07T14:17:46,783 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into 9d23ce0c89864264805d48608d3583d7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:46,783 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:46,783 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=12, startTime=1730989066270; duration=0sec 2024-11-07T14:17:46,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:46,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741918_1094 (size=12301) 2024-11-07T14:17:46,788 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/30c2209ead2e41edb53358ac240169c1 2024-11-07T14:17:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/9758aca211c44ee784de13dc72925586 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586 2024-11-07T14:17:46,806 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586, entries=150, sequenceid=371, filesize=12.0 K 2024-11-07T14:17:46,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/5983bbf02e5a49a0b9a7c8c86cacaaee as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee 2024-11-07T14:17:46,815 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee, entries=150, sequenceid=371, filesize=12.0 K 2024-11-07T14:17:46,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/30c2209ead2e41edb53358ac240169c1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1 2024-11-07T14:17:46,822 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1, entries=150, sequenceid=371, filesize=12.0 K 2024-11-07T14:17:46,825 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 5a16e97064ea2ba83f416db90324fc7e in 520ms, sequenceid=371, compaction requested=false 2024-11-07T14:17:46,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:46,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:46,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-07T14:17:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-07T14:17:46,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-07T14:17:46,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1360 sec 2024-11-07T14:17:46,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.1420 sec 2024-11-07T14:17:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:46,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-07T14:17:46,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:46,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:46,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:46,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:46,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/dd03925e48b7474fba37f178eba35ca8 is 50, key is test_row_0/A:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:46,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741919_1095 (size=12301) 2024-11-07T14:17:46,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/dd03925e48b7474fba37f178eba35ca8 2024-11-07T14:17:46,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989126851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989126851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989126851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989126852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/9ac5fffd1d5b494daca19a0f7639ba33 is 50, key is test_row_0/B:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:46,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989126866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741920_1096 (size=12301) 2024-11-07T14:17:46,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989126956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989126957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:46,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989126969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989127157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989127158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989127158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989127159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989127172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/9ac5fffd1d5b494daca19a0f7639ba33 2024-11-07T14:17:47,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/475c9a1c2a6047e791e4857ed4679ffe is 50, key is test_row_0/C:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:47,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741921_1097 (size=12301) 2024-11-07T14:17:47,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989127462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989127463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989127475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989127663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989127664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/475c9a1c2a6047e791e4857ed4679ffe 2024-11-07T14:17:47,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/dd03925e48b7474fba37f178eba35ca8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8 2024-11-07T14:17:47,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8, entries=150, sequenceid=401, filesize=12.0 K 2024-11-07T14:17:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/9ac5fffd1d5b494daca19a0f7639ba33 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33 2024-11-07T14:17:47,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33, entries=150, sequenceid=401, filesize=12.0 K 2024-11-07T14:17:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/475c9a1c2a6047e791e4857ed4679ffe as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe 2024-11-07T14:17:47,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe, entries=150, sequenceid=401, filesize=12.0 K 2024-11-07T14:17:47,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 5a16e97064ea2ba83f416db90324fc7e in 879ms, sequenceid=401, compaction requested=true 2024-11-07T14:17:47,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:47,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:47,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:47,718 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:47,718 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:47,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:47,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:47,719 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:47,720 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/B is initiating minor compaction (all files) 2024-11-07T14:17:47,720 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/B in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:47,720 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:47,720 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/19754ba997914d4281c5e76bc8595eca, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.9 K 2024-11-07T14:17:47,720 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/A is initiating minor compaction (all files) 2024-11-07T14:17:47,720 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/A in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:47,720 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/ee1d2d6b15c34bf1af8e2602eb1b55d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.9 K 2024-11-07T14:17:47,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a16e97064ea2ba83f416db90324fc7e:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:47,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:47,721 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 19754ba997914d4281c5e76bc8595eca, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:47,721 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee1d2d6b15c34bf1af8e2602eb1b55d8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:47,721 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5983bbf02e5a49a0b9a7c8c86cacaaee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1730989065395 2024-11-07T14:17:47,721 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9758aca211c44ee784de13dc72925586, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1730989065395 2024-11-07T14:17:47,722 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd03925e48b7474fba37f178eba35ca8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1730989066542 2024-11-07T14:17:47,722 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ac5fffd1d5b494daca19a0f7639ba33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1730989066542 2024-11-07T14:17:47,734 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#B#compaction#83 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:47,734 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#A#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:47,734 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/c981ec2fde084132a7a9567c61cde1df is 50, key is test_row_0/B:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:47,735 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2c875577d6df47ac80bfcf0d36293106 is 50, key is test_row_0/A:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:47,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741922_1098 (size=13255) 2024-11-07T14:17:47,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/2c875577d6df47ac80bfcf0d36293106 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2c875577d6df47ac80bfcf0d36293106 2024-11-07T14:17:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741923_1099 (size=13255) 2024-11-07T14:17:47,758 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/A of 5a16e97064ea2ba83f416db90324fc7e into 2c875577d6df47ac80bfcf0d36293106(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:47,758 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:47,758 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/A, priority=13, startTime=1730989067718; duration=0sec 2024-11-07T14:17:47,758 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:47,758 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:A 2024-11-07T14:17:47,758 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:47,760 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:47,760 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 5a16e97064ea2ba83f416db90324fc7e/C is initiating minor compaction (all files) 2024-11-07T14:17:47,760 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a16e97064ea2ba83f416db90324fc7e/C in TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:47,760 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/9d23ce0c89864264805d48608d3583d7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp, totalSize=36.8 K 2024-11-07T14:17:47,761 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d23ce0c89864264805d48608d3583d7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1730989064272 2024-11-07T14:17:47,761 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30c2209ead2e41edb53358ac240169c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1730989065395 2024-11-07T14:17:47,761 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 475c9a1c2a6047e791e4857ed4679ffe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1730989066542 2024-11-07T14:17:47,770 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a16e97064ea2ba83f416db90324fc7e#C#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:47,771 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/dac97a6d9ef144c38cadfb839ddacd3d is 50, key is test_row_0/C:col10/1730989066542/Put/seqid=0 2024-11-07T14:17:47,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741924_1100 (size=13221) 2024-11-07T14:17:47,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-07T14:17:47,794 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-07T14:17:47,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:47,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-07T14:17:47,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:47,798 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:47,799 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:47,799 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:47,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:47,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-07T14:17:47,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:47,952 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:47,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/0c352207b6044b27a7e09083c13a9edd is 50, key is test_row_0/A:col10/1730989066851/Put/seqid=0 2024-11-07T14:17:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741925_1101 (size=12301) 2024-11-07T14:17:47,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:47,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:47,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989127995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:47,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:47,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989127995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989127999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:48,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989128100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989128100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989128102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,164 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/c981ec2fde084132a7a9567c61cde1df as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/c981ec2fde084132a7a9567c61cde1df 2024-11-07T14:17:48,171 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/B of 5a16e97064ea2ba83f416db90324fc7e into c981ec2fde084132a7a9567c61cde1df(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:48,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:48,172 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/B, priority=13, startTime=1730989067718; duration=0sec 2024-11-07T14:17:48,172 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:48,172 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:B 2024-11-07T14:17:48,184 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/dac97a6d9ef144c38cadfb839ddacd3d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/dac97a6d9ef144c38cadfb839ddacd3d 2024-11-07T14:17:48,191 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a16e97064ea2ba83f416db90324fc7e/C of 5a16e97064ea2ba83f416db90324fc7e into dac97a6d9ef144c38cadfb839ddacd3d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:48,191 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:48,191 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e., storeName=5a16e97064ea2ba83f416db90324fc7e/C, priority=13, startTime=1730989067720; duration=0sec 2024-11-07T14:17:48,191 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:48,191 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a16e97064ea2ba83f416db90324fc7e:C 2024-11-07T14:17:48,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989128304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989128304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989128305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,364 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/0c352207b6044b27a7e09083c13a9edd 2024-11-07T14:17:48,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f7cb38d56fc842ffbc86f01ee595508c is 50, key is test_row_0/B:col10/1730989066851/Put/seqid=0 2024-11-07T14:17:48,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741926_1102 (size=12301) 2024-11-07T14:17:48,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:48,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989128606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989128607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989128609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45694 deadline: 1730989128667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:48,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45758 deadline: 1730989128667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:48,780 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f7cb38d56fc842ffbc86f01ee595508c 2024-11-07T14:17:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/69e8eef6c040437cb11a6781245ed5ea is 50, key is test_row_0/C:col10/1730989066851/Put/seqid=0 2024-11-07T14:17:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741927_1103 (size=12301) 2024-11-07T14:17:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:49,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:49,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1730989129110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:49,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:49,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45686 deadline: 1730989129110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:49,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:49,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45710 deadline: 1730989129113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:49,208 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/69e8eef6c040437cb11a6781245ed5ea 2024-11-07T14:17:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/0c352207b6044b27a7e09083c13a9edd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0c352207b6044b27a7e09083c13a9edd 2024-11-07T14:17:49,220 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0c352207b6044b27a7e09083c13a9edd, entries=150, sequenceid=412, filesize=12.0 K 2024-11-07T14:17:49,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/f7cb38d56fc842ffbc86f01ee595508c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f7cb38d56fc842ffbc86f01ee595508c 2024-11-07T14:17:49,227 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f7cb38d56fc842ffbc86f01ee595508c, entries=150, sequenceid=412, filesize=12.0 K 2024-11-07T14:17:49,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/69e8eef6c040437cb11a6781245ed5ea as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/69e8eef6c040437cb11a6781245ed5ea 2024-11-07T14:17:49,233 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/69e8eef6c040437cb11a6781245ed5ea, entries=150, sequenceid=412, filesize=12.0 K 2024-11-07T14:17:49,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-11-07T14:17:49,234 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 5a16e97064ea2ba83f416db90324fc7e in 1282ms, sequenceid=412, compaction requested=false 2024-11-07T14:17:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-07T14:17:49,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-07T14:17:49,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-07T14:17:49,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4360 sec 2024-11-07T14:17:49,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.4420 sec 2024-11-07T14:17:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-07T14:17:49,902 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-07T14:17:49,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-07T14:17:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:49,905 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:49,906 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:49,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:49,962 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a1285d to 127.0.0.1:51818 2024-11-07T14:17:49,962 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:49,962 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ed28bb to 127.0.0.1:51818 2024-11-07T14:17:49,962 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:49,964 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:51818 2024-11-07T14:17:49,964 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:49,964 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47fe2fa7 to 127.0.0.1:51818 2024-11-07T14:17:49,964 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:50,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:50,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-07T14:17:50,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:50,059 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:50,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b0f46b75b9304fe2adbeabb606eee2a9 is 50, key is test_row_0/A:col10/1730989067993/Put/seqid=0 2024-11-07T14:17:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741928_1104 (size=12301) 2024-11-07T14:17:50,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:50,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. as already flushing 2024-11-07T14:17:50,121 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f2091cc to 127.0.0.1:51818 2024-11-07T14:17:50,121 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e52b42a to 127.0.0.1:51818 2024-11-07T14:17:50,121 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,121 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,123 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18cb251d to 127.0.0.1:51818 2024-11-07T14:17:50,123 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:50,471 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b0f46b75b9304fe2adbeabb606eee2a9 2024-11-07T14:17:50,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/b55f3562693a46b0901cd4cb42fa1939 is 50, key is test_row_0/B:col10/1730989067993/Put/seqid=0 2024-11-07T14:17:50,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741929_1105 (size=12301) 2024-11-07T14:17:50,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:50,680 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45b55c24 to 127.0.0.1:51818 2024-11-07T14:17:50,680 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,690 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09bd0964 to 127.0.0.1:51818 2024-11-07T14:17:50,690 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:50,885 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/b55f3562693a46b0901cd4cb42fa1939 2024-11-07T14:17:50,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/628d5b06c72740829061ebe95a62a633 is 50, key is test_row_0/C:col10/1730989067993/Put/seqid=0 2024-11-07T14:17:50,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741930_1106 (size=12301) 2024-11-07T14:17:51,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:51,298 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/628d5b06c72740829061ebe95a62a633 2024-11-07T14:17:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/b0f46b75b9304fe2adbeabb606eee2a9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b0f46b75b9304fe2adbeabb606eee2a9 2024-11-07T14:17:51,309 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b0f46b75b9304fe2adbeabb606eee2a9, entries=150, sequenceid=440, filesize=12.0 K 2024-11-07T14:17:51,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/b55f3562693a46b0901cd4cb42fa1939 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/b55f3562693a46b0901cd4cb42fa1939 2024-11-07T14:17:51,315 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/b55f3562693a46b0901cd4cb42fa1939, entries=150, sequenceid=440, filesize=12.0 K 2024-11-07T14:17:51,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/628d5b06c72740829061ebe95a62a633 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/628d5b06c72740829061ebe95a62a633 2024-11-07T14:17:51,320 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/628d5b06c72740829061ebe95a62a633, entries=150, sequenceid=440, filesize=12.0 K 2024-11-07T14:17:51,321 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=33.54 KB/34350 for 5a16e97064ea2ba83f416db90324fc7e in 1262ms, sequenceid=440, compaction requested=true 2024-11-07T14:17:51,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:51,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:51,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-07T14:17:51,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-07T14:17:51,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-07T14:17:51,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4160 sec 2024-11-07T14:17:51,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.4210 sec 2024-11-07T14:17:52,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-07T14:17:52,010 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5827 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5895 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2514 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7541 rows 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2545 2024-11-07T14:17:52,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7635 rows 2024-11-07T14:17:52,010 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:17:52,010 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fcb5f29 to 127.0.0.1:51818 2024-11-07T14:17:52,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:17:52,016 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:17:52,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:17:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:52,027 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989072027"}]},"ts":"1730989072027"} 2024-11-07T14:17:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:52,028 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:17:52,031 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:17:52,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:17:52,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, UNASSIGN}] 2024-11-07T14:17:52,037 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, UNASSIGN 2024-11-07T14:17:52,037 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=5a16e97064ea2ba83f416db90324fc7e, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:52,038 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:17:52,039 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:52,193 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:52,195 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:52,195 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 5a16e97064ea2ba83f416db90324fc7e, disabling compactions & flushes 2024-11-07T14:17:52,196 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. after waiting 0 ms 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:52,196 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 5a16e97064ea2ba83f416db90324fc7e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=A 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=B 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a16e97064ea2ba83f416db90324fc7e, store=C 2024-11-07T14:17:52,196 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:52,200 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/7cce60e2472c4f509462cccbe3ef0e16 is 50, key is test_row_0/A:col10/1730989070678/Put/seqid=0 2024-11-07T14:17:52,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741931_1107 (size=12301) 2024-11-07T14:17:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:52,403 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T14:17:52,605 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/7cce60e2472c4f509462cccbe3ef0e16 2024-11-07T14:17:52,613 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ed438761cd2e4e2991d1cbca622092c8 is 50, key is test_row_0/B:col10/1730989070678/Put/seqid=0 2024-11-07T14:17:52,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741932_1108 (size=12301) 2024-11-07T14:17:52,628 ERROR [LeaseRenewer:jenkins@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:53,017 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ed438761cd2e4e2991d1cbca622092c8 2024-11-07T14:17:53,025 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/190c1572834a492c8c60d1bbe2a50dc4 is 50, key is test_row_0/C:col10/1730989070678/Put/seqid=0 2024-11-07T14:17:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741933_1109 (size=12301) 2024-11-07T14:17:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:53,430 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/190c1572834a492c8c60d1bbe2a50dc4 2024-11-07T14:17:53,436 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/A/7cce60e2472c4f509462cccbe3ef0e16 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/7cce60e2472c4f509462cccbe3ef0e16 2024-11-07T14:17:53,441 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/7cce60e2472c4f509462cccbe3ef0e16, entries=150, sequenceid=448, filesize=12.0 K 2024-11-07T14:17:53,442 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/B/ed438761cd2e4e2991d1cbca622092c8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ed438761cd2e4e2991d1cbca622092c8 2024-11-07T14:17:53,446 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ed438761cd2e4e2991d1cbca622092c8, entries=150, sequenceid=448, filesize=12.0 K 2024-11-07T14:17:53,447 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/.tmp/C/190c1572834a492c8c60d1bbe2a50dc4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/190c1572834a492c8c60d1bbe2a50dc4 2024-11-07T14:17:53,451 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/190c1572834a492c8c60d1bbe2a50dc4, entries=150, sequenceid=448, filesize=12.0 K 2024-11-07T14:17:53,452 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5a16e97064ea2ba83f416db90324fc7e in 1256ms, sequenceid=448, compaction requested=true 2024-11-07T14:17:53,453 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/db00db09b197479daee8b01310fa4086, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/f516f1914b1a45b5a90e660b852f9549, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/436981c5ec2f41aeae45d16137f26500, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/69ae0789e6fb4814aec127e1ffefd57c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2dff8070276e4a1cb9b02044dd54b3d3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0e2d862bc983479e8ed33f55e9fc29b5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/703dbf5502c34276842850f13bc1d162, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/ee1d2d6b15c34bf1af8e2602eb1b55d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8] to archive 2024-11-07T14:17:53,457 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:17:53,463 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/796e2af9d0334fc39af0344e03d7f9f6 2024-11-07T14:17:53,464 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/e7f0075ed0954288bf8d73fd35e29be8 2024-11-07T14:17:53,466 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/80c512a7665542e0acbc7d118b45ab01 2024-11-07T14:17:53,467 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/db00db09b197479daee8b01310fa4086 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/db00db09b197479daee8b01310fa4086 2024-11-07T14:17:53,468 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/360f07f8cb1b4079a2641ba99ab6cd1d 2024-11-07T14:17:53,470 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/1849150949354d9a8b8fc3dbd4623018 2024-11-07T14:17:53,471 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/f516f1914b1a45b5a90e660b852f9549 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/f516f1914b1a45b5a90e660b852f9549 2024-11-07T14:17:53,472 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9e814490b2d64edf9db0b0fc00d1b4fa 2024-11-07T14:17:53,473 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/436981c5ec2f41aeae45d16137f26500 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/436981c5ec2f41aeae45d16137f26500 2024-11-07T14:17:53,475 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/5d26bd7599674cadaf6d316b5a3e1d7a 2024-11-07T14:17:53,476 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eb692dd642c74d19832d657e23c8df74 2024-11-07T14:17:53,477 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/69ae0789e6fb4814aec127e1ffefd57c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/69ae0789e6fb4814aec127e1ffefd57c 2024-11-07T14:17:53,478 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/bfedfb975a224805a0a26a0f9741f89f 2024-11-07T14:17:53,479 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/3248e79ccd4b4c2d90e498a668d2ec5b 2024-11-07T14:17:53,480 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/14d0c1425be244a39be7b29f8abf94fd 2024-11-07T14:17:53,482 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2a49e9645aff469f9ce928def847f888 2024-11-07T14:17:53,483 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2dff8070276e4a1cb9b02044dd54b3d3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2dff8070276e4a1cb9b02044dd54b3d3 2024-11-07T14:17:53,484 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/53b1d013bf1a48508c5aa4dc53717c6b 2024-11-07T14:17:53,485 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/68d3a8e95167422e852b008d4b979c5a 2024-11-07T14:17:53,486 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0e2d862bc983479e8ed33f55e9fc29b5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0e2d862bc983479e8ed33f55e9fc29b5 2024-11-07T14:17:53,488 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/60b43c8945224fc6bc0abe36165f0895 2024-11-07T14:17:53,489 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/eab722de001c4e29a38c165b58384521 2024-11-07T14:17:53,490 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/703dbf5502c34276842850f13bc1d162 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/703dbf5502c34276842850f13bc1d162 2024-11-07T14:17:53,491 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b853de66d18f4339b4c5bb1d14b38314 2024-11-07T14:17:53,492 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/ee1d2d6b15c34bf1af8e2602eb1b55d8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/ee1d2d6b15c34bf1af8e2602eb1b55d8 2024-11-07T14:17:53,494 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b089d264503d4a2392bf3d89f831a678 2024-11-07T14:17:53,495 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/9758aca211c44ee784de13dc72925586 2024-11-07T14:17:53,496 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/dd03925e48b7474fba37f178eba35ca8 2024-11-07T14:17:53,510 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/39eb218744e3469f9e35127787601ebc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/94f77e31d5e34141ad44250912ba8c75, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33e17e4d37d049a5a54f8363981365db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/731a0687a4a941e89f109c3d51308e75, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/42ead432e7ea49b18507cf7810db8d42, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33466e9a1f114ade98453edb06f67dcc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/19754ba997914d4281c5e76bc8595eca, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33] to archive 2024-11-07T14:17:53,511 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:17:53,513 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/2e20cb1039314b609e9f7c65b5019328 2024-11-07T14:17:53,514 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/7bb990414f01430881cc443124c0440c 2024-11-07T14:17:53,516 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/39eb218744e3469f9e35127787601ebc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/39eb218744e3469f9e35127787601ebc 2024-11-07T14:17:53,517 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f572a7ea75e64b3aa6c565ad28077dd5 2024-11-07T14:17:53,518 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/552b54050d4647c5889b3c1978d53940 2024-11-07T14:17:53,519 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/94f77e31d5e34141ad44250912ba8c75 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/94f77e31d5e34141ad44250912ba8c75 2024-11-07T14:17:53,521 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f14d3fca9e994edda7b602f0e71a2647 2024-11-07T14:17:53,522 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/24b34518526944dea362474412cec540 2024-11-07T14:17:53,523 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33e17e4d37d049a5a54f8363981365db to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33e17e4d37d049a5a54f8363981365db 2024-11-07T14:17:53,524 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/e93f2df5ec7b41948934bce0a44bd4d9 2024-11-07T14:17:53,525 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/212b74391879409bba3aca3671ef9f1e 2024-11-07T14:17:53,527 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/731a0687a4a941e89f109c3d51308e75 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/731a0687a4a941e89f109c3d51308e75 2024-11-07T14:17:53,528 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/174e4565fa204e24b2775b8329c4a61d 2024-11-07T14:17:53,529 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/dbb6e569365f47e29e2f13e59d5eb66e 2024-11-07T14:17:53,530 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4160d6f028044bb4998d37a44a3f7699 2024-11-07T14:17:53,531 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/42ead432e7ea49b18507cf7810db8d42 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/42ead432e7ea49b18507cf7810db8d42 2024-11-07T14:17:53,532 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/eadb7bea8db44e56ba145ba6230c7d59 2024-11-07T14:17:53,533 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/67100ea7d624484ead92ae661508a97a 2024-11-07T14:17:53,534 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/4e0ad9b8b90b41cfba8c3776fc2ed0ae 2024-11-07T14:17:53,535 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ef6916e1510b4ed9821569b2ae4ee84c 2024-11-07T14:17:53,537 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ac897d0a5dbb4c878ae012056379ab35 2024-11-07T14:17:53,538 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33466e9a1f114ade98453edb06f67dcc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/33466e9a1f114ade98453edb06f67dcc 2024-11-07T14:17:53,539 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/d90dc46c2cb34381a8f6eb36a7c5c328 2024-11-07T14:17:53,540 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/95202dea5d374a498a10beb23279e8a8 2024-11-07T14:17:53,541 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/19754ba997914d4281c5e76bc8595eca to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/19754ba997914d4281c5e76bc8595eca 2024-11-07T14:17:53,542 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/25dc626570d7499ab24fdbaf8f3999fb 2024-11-07T14:17:53,543 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/5983bbf02e5a49a0b9a7c8c86cacaaee 2024-11-07T14:17:53,544 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/9ac5fffd1d5b494daca19a0f7639ba33 2024-11-07T14:17:53,546 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/cebf6a695fae43fb8dda0ace61312de9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f3aa893977a7471bb00db3d47dc24f2f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/b1c13821730149fc88e22749e21978f9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/db84c37e775a4fa19f9f8390fa8aee77, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/6884a53971644e6da4b4442b3264c50c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/310bee2df51b493c8b8df646a062464f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/9d23ce0c89864264805d48608d3583d7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe] to archive 2024-11-07T14:17:53,547 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:17:53,548 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a35997f314414218acc6247d02e434ca 2024-11-07T14:17:53,550 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/44f145425e764e4ab656989d4140db3b 2024-11-07T14:17:53,551 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/cebf6a695fae43fb8dda0ace61312de9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/cebf6a695fae43fb8dda0ace61312de9 2024-11-07T14:17:53,552 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/79bfd461eb9e4e49a339530fa63c01d0 2024-11-07T14:17:53,554 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/10edcd64df8343268dcc2e35529f938d 2024-11-07T14:17:53,555 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f3aa893977a7471bb00db3d47dc24f2f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f3aa893977a7471bb00db3d47dc24f2f 2024-11-07T14:17:53,556 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a49d339e4f3745d2833f0b8a2fad9861 2024-11-07T14:17:53,557 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/377e01d97702478eaecaf85d8ff5aeef 2024-11-07T14:17:53,558 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/b1c13821730149fc88e22749e21978f9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/b1c13821730149fc88e22749e21978f9 2024-11-07T14:17:53,560 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4290451b829d4da8b2a58a1353c5df96 2024-11-07T14:17:53,561 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/56f8ef401b7f4ebb9d5957edda0c752c 2024-11-07T14:17:53,562 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/db84c37e775a4fa19f9f8390fa8aee77 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/db84c37e775a4fa19f9f8390fa8aee77 2024-11-07T14:17:53,563 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/f5454a88e7f547babfc65fd29fe64547 2024-11-07T14:17:53,564 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/ef05161a23db4647bde0c6be876f68e9 2024-11-07T14:17:53,565 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4d15a9d101444bffa9e535339b0a8c87 2024-11-07T14:17:53,567 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/6884a53971644e6da4b4442b3264c50c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/6884a53971644e6da4b4442b3264c50c 2024-11-07T14:17:53,568 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/2c7eacd8ce8f4fd9a1a75701e0a89a4b 2024-11-07T14:17:53,569 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/640e60cc3f9f41538332554f8887bc14 2024-11-07T14:17:53,570 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/248502cd209746a3ba90733718e24d38 2024-11-07T14:17:53,571 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/310bee2df51b493c8b8df646a062464f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/310bee2df51b493c8b8df646a062464f 2024-11-07T14:17:53,572 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/a6fa5f568a7b426c87e71e72841b4b8b 2024-11-07T14:17:53,574 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/4fcc855f403743229527a9e9690cdde5 2024-11-07T14:17:53,575 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/e7ae01c4efc5408e8824f3d29c96e287 2024-11-07T14:17:53,576 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/9d23ce0c89864264805d48608d3583d7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/9d23ce0c89864264805d48608d3583d7 2024-11-07T14:17:53,578 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/d88cfd67b4ce4e698f8e85733c7625bd 2024-11-07T14:17:53,579 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/30c2209ead2e41edb53358ac240169c1 2024-11-07T14:17:53,580 DEBUG [StoreCloser-TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/475c9a1c2a6047e791e4857ed4679ffe 2024-11-07T14:17:53,584 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/recovered.edits/451.seqid, newMaxSeqId=451, maxSeqId=1 2024-11-07T14:17:53,587 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e. 2024-11-07T14:17:53,587 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 5a16e97064ea2ba83f416db90324fc7e: 2024-11-07T14:17:53,589 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:53,589 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=5a16e97064ea2ba83f416db90324fc7e, regionState=CLOSED 2024-11-07T14:17:53,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-07T14:17:53,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 5a16e97064ea2ba83f416db90324fc7e, server=69430dbfd73f,45917,1730989044081 in 1.5510 sec 2024-11-07T14:17:53,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-07T14:17:53,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a16e97064ea2ba83f416db90324fc7e, UNASSIGN in 1.5550 sec 2024-11-07T14:17:53,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-07T14:17:53,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5610 sec 2024-11-07T14:17:53,596 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989073596"}]},"ts":"1730989073596"} 2024-11-07T14:17:53,597 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:17:53,599 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:17:53,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5770 sec 2024-11-07T14:17:53,828 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T14:17:54,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-07T14:17:54,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-07T14:17:54,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:17:54,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,141 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T14:17:54,142 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,146 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:54,150 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/recovered.edits] 2024-11-07T14:17:54,154 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0c352207b6044b27a7e09083c13a9edd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/0c352207b6044b27a7e09083c13a9edd 2024-11-07T14:17:54,155 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2c875577d6df47ac80bfcf0d36293106 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/2c875577d6df47ac80bfcf0d36293106 2024-11-07T14:17:54,156 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/7cce60e2472c4f509462cccbe3ef0e16 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/7cce60e2472c4f509462cccbe3ef0e16 2024-11-07T14:17:54,158 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b0f46b75b9304fe2adbeabb606eee2a9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/A/b0f46b75b9304fe2adbeabb606eee2a9 2024-11-07T14:17:54,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/b55f3562693a46b0901cd4cb42fa1939 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/b55f3562693a46b0901cd4cb42fa1939 2024-11-07T14:17:54,162 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/c981ec2fde084132a7a9567c61cde1df to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/c981ec2fde084132a7a9567c61cde1df 2024-11-07T14:17:54,163 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ed438761cd2e4e2991d1cbca622092c8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/ed438761cd2e4e2991d1cbca622092c8 2024-11-07T14:17:54,165 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f7cb38d56fc842ffbc86f01ee595508c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/B/f7cb38d56fc842ffbc86f01ee595508c 2024-11-07T14:17:54,167 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/190c1572834a492c8c60d1bbe2a50dc4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/190c1572834a492c8c60d1bbe2a50dc4 2024-11-07T14:17:54,168 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/628d5b06c72740829061ebe95a62a633 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/628d5b06c72740829061ebe95a62a633 2024-11-07T14:17:54,170 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/69e8eef6c040437cb11a6781245ed5ea to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/69e8eef6c040437cb11a6781245ed5ea 2024-11-07T14:17:54,171 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/dac97a6d9ef144c38cadfb839ddacd3d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/C/dac97a6d9ef144c38cadfb839ddacd3d 2024-11-07T14:17:54,174 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/recovered.edits/451.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e/recovered.edits/451.seqid 2024-11-07T14:17:54,174 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/5a16e97064ea2ba83f416db90324fc7e 2024-11-07T14:17:54,174 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:17:54,179 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-07T14:17:54,187 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:17:54,218 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:17:54,220 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,220 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:17:54,220 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989074220"}]},"ts":"9223372036854775807"} 2024-11-07T14:17:54,223 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:17:54,223 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5a16e97064ea2ba83f416db90324fc7e, NAME => 'TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:17:54,223 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:17:54,223 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989074223"}]},"ts":"9223372036854775807"} 2024-11-07T14:17:54,226 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:17:54,228 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 92 msec 2024-11-07T14:17:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-07T14:17:54,243 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-07T14:17:54,254 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;69430dbfd73f:45917-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1443265777_22 at /127.0.0.1:40022 [Waiting for operation #314] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x10f14607-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_46215851_22 at /127.0.0.1:37530 [Waiting for operation #291] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=436 (was 325) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6283 (was 5966) - AvailableMemoryMB LEAK? - 2024-11-07T14:17:54,263 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=436, ProcessCount=11, AvailableMemoryMB=6283 2024-11-07T14:17:54,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:17:54,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:17:54,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:54,267 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:17:54,268 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:54,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-11-07T14:17:54,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:54,268 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:17:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741934_1110 (size=960) 2024-11-07T14:17:54,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:54,677 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:17:54,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741935_1111 (size=53) 2024-11-07T14:17:54,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a97b90143d56ce006ffcb227cc121b11, disabling compactions & flushes 2024-11-07T14:17:55,084 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. after waiting 0 ms 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,084 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,084 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:55,086 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:17:55,086 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989075086"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989075086"}]},"ts":"1730989075086"} 2024-11-07T14:17:55,088 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:17:55,088 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:17:55,089 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989075089"}]},"ts":"1730989075089"} 2024-11-07T14:17:55,090 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:17:55,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, ASSIGN}] 2024-11-07T14:17:55,096 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, ASSIGN 2024-11-07T14:17:55,096 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:17:55,247 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:55,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:55,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:55,404 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,404 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:55,405 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,405 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:55,405 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,405 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,406 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,408 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:55,408 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName A 2024-11-07T14:17:55,408 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:55,409 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:55,409 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,410 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:55,411 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName B 2024-11-07T14:17:55,411 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:55,411 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:55,411 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,412 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:55,412 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName C 2024-11-07T14:17:55,413 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:55,413 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:55,413 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,414 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,414 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,416 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:17:55,417 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:55,419 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:17:55,419 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened a97b90143d56ce006ffcb227cc121b11; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59219216, jitterRate=-0.11756491661071777}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:17:55,420 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:55,421 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., pid=37, masterSystemTime=1730989075400 2024-11-07T14:17:55,422 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,422 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:55,423 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:55,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-07T14:17:55,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 in 175 msec 2024-11-07T14:17:55,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-07T14:17:55,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, ASSIGN in 330 msec 2024-11-07T14:17:55,427 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:17:55,427 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989075427"}]},"ts":"1730989075427"} 2024-11-07T14:17:55,428 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:17:55,432 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:17:55,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-11-07T14:17:56,153 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T14:17:56,154 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T14:17:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-07T14:17:56,374 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-11-07T14:17:56,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04ddf4c3 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ff872d8 2024-11-07T14:17:56,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4506927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:56,381 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:56,383 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:56,384 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:17:56,386 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:17:56,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:17:56,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:17:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T14:17:56,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741936_1112 (size=996) 2024-11-07T14:17:56,483 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:56,811 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T14:17:56,811 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T14:17:56,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:17:56,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, REOPEN/MOVE}] 2024-11-07T14:17:56,824 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, REOPEN/MOVE 2024-11-07T14:17:56,825 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:56,826 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:17:56,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:56,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:56,978 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:56,978 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:17:56,978 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing a97b90143d56ce006ffcb227cc121b11, disabling compactions & flushes 2024-11-07T14:17:56,978 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:56,978 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:56,978 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. after waiting 0 ms 2024-11-07T14:17:56,978 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:56,983 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T14:17:56,983 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:56,983 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:56,983 WARN [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: a97b90143d56ce006ffcb227cc121b11 to self. 2024-11-07T14:17:56,985 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:56,985 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=CLOSED 2024-11-07T14:17:56,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-07T14:17:56,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 in 160 msec 2024-11-07T14:17:56,988 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, REOPEN/MOVE; state=CLOSED, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=true 2024-11-07T14:17:57,138 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:17:57,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,295 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,295 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:17:57,296 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,296 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:17:57,296 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,296 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,300 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,301 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:57,306 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName A 2024-11-07T14:17:57,308 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:57,309 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:57,309 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,310 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:57,310 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName B 2024-11-07T14:17:57,310 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:57,310 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:57,311 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,311 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:17:57,311 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a97b90143d56ce006ffcb227cc121b11 columnFamilyName C 2024-11-07T14:17:57,311 DEBUG [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:57,312 INFO [StoreOpener-a97b90143d56ce006ffcb227cc121b11-1 {}] regionserver.HStore(327): Store=a97b90143d56ce006ffcb227cc121b11/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:17:57,312 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,312 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,313 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,315 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:17:57,316 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,317 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened a97b90143d56ce006ffcb227cc121b11; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73537301, jitterRate=0.09579117596149445}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:17:57,319 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:57,319 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., pid=42, masterSystemTime=1730989077292 2024-11-07T14:17:57,321 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,321 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,321 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=OPEN, openSeqNum=5, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-11-07T14:17:57,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 in 183 msec 2024-11-07T14:17:57,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-07T14:17:57,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, REOPEN/MOVE in 501 msec 2024-11-07T14:17:57,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-07T14:17:57,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-11-07T14:17:57,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 934 msec 2024-11-07T14:17:57,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-07T14:17:57,338 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7362d978 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cae6c5c 2024-11-07T14:17:57,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7d6279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-11-07T14:17:57,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,350 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-11-07T14:17:57,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,355 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-11-07T14:17:57,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,359 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-07T14:17:57,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,363 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-11-07T14:17:57,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-11-07T14:17:57,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-11-07T14:17:57,380 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,381 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-11-07T14:17:57,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:17:57,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:17:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-07T14:17:57,388 DEBUG [hconnection-0x2cb28f91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,389 DEBUG [hconnection-0x1752ebcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,389 DEBUG [hconnection-0x719c3a94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,389 DEBUG [hconnection-0x67e04fdd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,390 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:17:57,390 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,391 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,391 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,391 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:17:57,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:17:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:57,394 DEBUG [hconnection-0x5f17641-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,395 DEBUG [hconnection-0x4af473e0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,396 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,396 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,396 DEBUG [hconnection-0x3ea3f6bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,397 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,397 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,399 DEBUG [hconnection-0xc353fb5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,401 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,403 DEBUG [hconnection-0x52ee7987-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:17:57,405 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:17:57,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:17:57,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:17:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:57,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989137447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989137447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989137450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989137450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989137450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f236d594be1c4ec78cc9c1c4ad6356c1_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989077405/Put/seqid=0 2024-11-07T14:17:57,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741937_1113 (size=12154) 2024-11-07T14:17:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:57,544 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:57,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989137555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989137555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989137555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989137556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989137556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:57,697 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:57,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:57,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989137760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989137760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989137760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989137761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989137761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,851 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:57,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:57,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:57,880 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:57,886 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f236d594be1c4ec78cc9c1c4ad6356c1_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f236d594be1c4ec78cc9c1c4ad6356c1_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:57,887 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b4729696b1e24a6a9ed6287989dfd441, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:57,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b4729696b1e24a6a9ed6287989dfd441 is 175, key is test_row_0/A:col10/1730989077405/Put/seqid=0 2024-11-07T14:17:57,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741938_1114 (size=30955) 2024-11-07T14:17:57,935 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b4729696b1e24a6a9ed6287989dfd441 2024-11-07T14:17:57,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/a84a6a641b584866ae4945bb123c29cd is 50, key is test_row_0/B:col10/1730989077405/Put/seqid=0 2024-11-07T14:17:57,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741939_1115 (size=12001) 2024-11-07T14:17:57,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/a84a6a641b584866ae4945bb123c29cd 2024-11-07T14:17:57,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:58,005 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e763f5e272c445a98de282bf10d07fe is 50, key is test_row_0/C:col10/1730989077405/Put/seqid=0 2024-11-07T14:17:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741940_1116 (size=12001) 2024-11-07T14:17:58,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989138065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989138068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989138068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989138071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989138071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,158 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,311 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e763f5e272c445a98de282bf10d07fe 2024-11-07T14:17:58,464 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b4729696b1e24a6a9ed6287989dfd441 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441 2024-11-07T14:17:58,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441, entries=150, sequenceid=17, filesize=30.2 K 2024-11-07T14:17:58,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/a84a6a641b584866ae4945bb123c29cd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd 2024-11-07T14:17:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:58,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T14:17:58,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e763f5e272c445a98de282bf10d07fe as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe 2024-11-07T14:17:58,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T14:17:58,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a97b90143d56ce006ffcb227cc121b11 in 1109ms, sequenceid=17, compaction requested=false 2024-11-07T14:17:58,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:58,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:17:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:17:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:17:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:17:58,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110796f0be9ef2914e23a81f0dbebb1b282d_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989077447/Put/seqid=0 2024-11-07T14:17:58,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989138586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989138590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989138592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989138594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989138595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741941_1117 (size=17034) 2024-11-07T14:17:58,651 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:58,660 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110796f0be9ef2914e23a81f0dbebb1b282d_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110796f0be9ef2914e23a81f0dbebb1b282d_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:58,663 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/918fd67798a34102b3cc3b60a646bd73, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:58,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/918fd67798a34102b3cc3b60a646bd73 is 175, key is test_row_0/A:col10/1730989077447/Put/seqid=0 2024-11-07T14:17:58,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989138697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741942_1118 (size=48139) 2024-11-07T14:17:58,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989138698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989138698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/918fd67798a34102b3cc3b60a646bd73 2024-11-07T14:17:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989138700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989138701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3a1e6b8bcce445328d5067dcb1276524 is 50, key is test_row_0/B:col10/1730989077447/Put/seqid=0 2024-11-07T14:17:58,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741943_1119 (size=12001) 2024-11-07T14:17:58,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3a1e6b8bcce445328d5067dcb1276524 2024-11-07T14:17:58,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/e587b520efa24be6acefce6f33d6f1df is 50, key is test_row_0/C:col10/1730989077447/Put/seqid=0 2024-11-07T14:17:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741944_1120 (size=12001) 2024-11-07T14:17:58,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/e587b520efa24be6acefce6f33d6f1df 2024-11-07T14:17:58,772 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/918fd67798a34102b3cc3b60a646bd73 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73 2024-11-07T14:17:58,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73, entries=250, sequenceid=42, filesize=47.0 K 2024-11-07T14:17:58,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3a1e6b8bcce445328d5067dcb1276524 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524 2024-11-07T14:17:58,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524, entries=150, sequenceid=42, filesize=11.7 K 2024-11-07T14:17:58,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/e587b520efa24be6acefce6f33d6f1df as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df 2024-11-07T14:17:58,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df, entries=150, sequenceid=42, filesize=11.7 K 2024-11-07T14:17:58,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a97b90143d56ce006ffcb227cc121b11 in 230ms, sequenceid=42, compaction requested=false 2024-11-07T14:17:58,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:58,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:17:58,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:17:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:17:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:17:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:58,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:58,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:58,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:58,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:58,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110724faf90fe12d4a7dbb5aaa07d63e86de_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:58,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989138949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989138950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989138950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989138951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741945_1121 (size=14594) 2024-11-07T14:17:58,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989138954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:58,959 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:58,964 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110724faf90fe12d4a7dbb5aaa07d63e86de_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110724faf90fe12d4a7dbb5aaa07d63e86de_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:58,966 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/a4edbfffbc654f2b8db5b490e8cd5d27, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:58,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/a4edbfffbc654f2b8db5b490e8cd5d27 is 175, key is test_row_0/A:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:58,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741946_1122 (size=39549) 2024-11-07T14:17:58,976 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/a4edbfffbc654f2b8db5b490e8cd5d27 2024-11-07T14:17:58,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/d4680b443a03492c9d561f463dad31af is 50, key is test_row_0/B:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:59,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741947_1123 (size=12001) 2024-11-07T14:17:59,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989139056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989139056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989139056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989139056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989139059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,083 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:59,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:59,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,084 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,146 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:17:59,237 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989139261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989139261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989139262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989139262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989139265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,391 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:59,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:59,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:17:59,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/d4680b443a03492c9d561f463dad31af 2024-11-07T14:17:59,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/b77db89c788247369d3dfca2c92a695b is 50, key is test_row_0/C:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741948_1124 (size=12001) 2024-11-07T14:17:59,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/b77db89c788247369d3dfca2c92a695b 2024-11-07T14:17:59,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/a4edbfffbc654f2b8db5b490e8cd5d27 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27 2024-11-07T14:17:59,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27, entries=200, sequenceid=55, filesize=38.6 K 2024-11-07T14:17:59,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/d4680b443a03492c9d561f463dad31af as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af 2024-11-07T14:17:59,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af, entries=150, sequenceid=55, filesize=11.7 K 2024-11-07T14:17:59,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/b77db89c788247369d3dfca2c92a695b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b 2024-11-07T14:17:59,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b, entries=150, sequenceid=55, filesize=11.7 K 2024-11-07T14:17:59,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a97b90143d56ce006ffcb227cc121b11 in 582ms, sequenceid=55, compaction requested=true 2024-11-07T14:17:59,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:59,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:17:59,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:59,485 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:59,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:17:59,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:59,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:17:59,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:59,486 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:17:59,487 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,487 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,487 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.2 K 2024-11-07T14:17:59,487 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=115.9 K 2024-11-07T14:17:59,487 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27] 2024-11-07T14:17:59,487 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a84a6a641b584866ae4945bb123c29cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989077405 2024-11-07T14:17:59,488 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4729696b1e24a6a9ed6287989dfd441, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989077405 2024-11-07T14:17:59,488 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a1e6b8bcce445328d5067dcb1276524, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730989077447 2024-11-07T14:17:59,488 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 918fd67798a34102b3cc3b60a646bd73, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730989077416 2024-11-07T14:17:59,489 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d4680b443a03492c9d561f463dad31af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078587 2024-11-07T14:17:59,489 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4edbfffbc654f2b8db5b490e8cd5d27, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078581 2024-11-07T14:17:59,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:17:59,502 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,502 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:59,503 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e79b3572b5664850abfb5438787be9a4 is 50, key is test_row_0/B:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:59,508 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024110737a849fe945d406f9485a500f9205b83_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,513 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024110737a849fe945d406f9485a500f9205b83_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,514 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110737a849fe945d406f9485a500f9205b83_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741949_1125 (size=12104) 2024-11-07T14:17:59,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741950_1126 (size=4469) 2024-11-07T14:17:59,544 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#105 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:59,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-07T14:17:59,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,546 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:17:59,547 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2afa080b5c3846f6885f5b92321d0b86 is 175, key is test_row_0/A:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:17:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,551 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e79b3572b5664850abfb5438787be9a4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e79b3572b5664850abfb5438787be9a4 2024-11-07T14:17:59,560 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into e79b3572b5664850abfb5438787be9a4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:59,560 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:59,560 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=13, startTime=1730989079486; duration=0sec 2024-11-07T14:17:59,560 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:17:59,560 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:17:59,560 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:17:59,562 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:17:59,562 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:17:59,562 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,563 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.2 K 2024-11-07T14:17:59,563 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e763f5e272c445a98de282bf10d07fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989077405 2024-11-07T14:17:59,563 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e587b520efa24be6acefce6f33d6f1df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1730989077447 2024-11-07T14:17:59,564 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting b77db89c788247369d3dfca2c92a695b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078587 2024-11-07T14:17:59,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741951_1127 (size=31058) 2024-11-07T14:17:59,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b29143f742bb4682959ebd77f2a4a5a4_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989078952/Put/seqid=0 2024-11-07T14:17:59,576 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2afa080b5c3846f6885f5b92321d0b86 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86 2024-11-07T14:17:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:59,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:17:59,587 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into 2afa080b5c3846f6885f5b92321d0b86(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:59,587 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:59,587 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=13, startTime=1730989079485; duration=0sec 2024-11-07T14:17:59,587 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:59,587 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:17:59,590 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:17:59,591 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/37d3f840407746548aced53860f1db69 is 50, key is test_row_0/C:col10/1730989078901/Put/seqid=0 2024-11-07T14:17:59,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989139588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989139591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741952_1128 (size=12154) 2024-11-07T14:17:59,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:59,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989139594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989139596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989139596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,604 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b29143f742bb4682959ebd77f2a4a5a4_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b29143f742bb4682959ebd77f2a4a5a4_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/82f91c3ffb4147d5962201b394e03f49, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/82f91c3ffb4147d5962201b394e03f49 is 175, key is test_row_0/A:col10/1730989078952/Put/seqid=0 2024-11-07T14:17:59,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741953_1129 (size=12104) 2024-11-07T14:17:59,624 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/37d3f840407746548aced53860f1db69 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/37d3f840407746548aced53860f1db69 2024-11-07T14:17:59,632 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 37d3f840407746548aced53860f1db69(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:17:59,632 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:59,632 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=13, startTime=1730989079486; duration=0sec 2024-11-07T14:17:59,632 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:17:59,632 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:17:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741954_1130 (size=30955) 2024-11-07T14:17:59,634 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/82f91c3ffb4147d5962201b394e03f49 2024-11-07T14:17:59,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c4f019cfd3454843ae131965ea75dead is 50, key is test_row_0/B:col10/1730989078952/Put/seqid=0 2024-11-07T14:17:59,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741955_1131 (size=12001) 2024-11-07T14:17:59,678 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c4f019cfd3454843ae131965ea75dead 2024-11-07T14:17:59,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1062487ac65b4c9d91c6625cdff4d3ed is 50, key is test_row_0/C:col10/1730989078952/Put/seqid=0 2024-11-07T14:17:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741956_1132 (size=12001) 2024-11-07T14:17:59,697 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1062487ac65b4c9d91c6625cdff4d3ed 2024-11-07T14:17:59,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989139696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989139697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989139702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989139702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989139702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/82f91c3ffb4147d5962201b394e03f49 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49 2024-11-07T14:17:59,715 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49, entries=150, sequenceid=78, filesize=30.2 K 2024-11-07T14:17:59,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c4f019cfd3454843ae131965ea75dead as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead 2024-11-07T14:17:59,729 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead, entries=150, sequenceid=78, filesize=11.7 K 2024-11-07T14:17:59,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1062487ac65b4c9d91c6625cdff4d3ed as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed 2024-11-07T14:17:59,737 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed, entries=150, sequenceid=78, filesize=11.7 K 2024-11-07T14:17:59,738 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for a97b90143d56ce006ffcb227cc121b11 in 192ms, sequenceid=78, compaction requested=false 2024-11-07T14:17:59,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:17:59,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:17:59,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-07T14:17:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-07T14:17:59,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-07T14:17:59,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3480 sec 2024-11-07T14:17:59,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 2.3560 sec 2024-11-07T14:17:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:59,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:17:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:17:59,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077f7573e80eaa495abf904cbc3c8c5b87_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989079899/Put/seqid=0 2024-11-07T14:17:59,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989139919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989139920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989139922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989139923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:17:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989139923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:17:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741957_1133 (size=12154) 2024-11-07T14:17:59,935 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:17:59,941 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411077f7573e80eaa495abf904cbc3c8c5b87_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077f7573e80eaa495abf904cbc3c8c5b87_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:17:59,943 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2b258bd51ae54bf28373e660683a46ab, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:17:59,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2b258bd51ae54bf28373e660683a46ab is 175, key is test_row_0/A:col10/1730989079899/Put/seqid=0 2024-11-07T14:17:59,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741958_1134 (size=30955) 2024-11-07T14:17:59,969 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2b258bd51ae54bf28373e660683a46ab 2024-11-07T14:17:59,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c037a8538adb488f9764fc97f809e86f is 50, key is test_row_0/B:col10/1730989079899/Put/seqid=0 2024-11-07T14:18:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741959_1135 (size=12001) 2024-11-07T14:18:00,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c037a8538adb488f9764fc97f809e86f 2024-11-07T14:18:00,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3453119d0a3c4c0f8f248f0ea80c0561 is 50, key is test_row_0/C:col10/1730989079899/Put/seqid=0 2024-11-07T14:18:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989140027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989140027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989140028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989140028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989140029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741960_1136 (size=12001) 2024-11-07T14:18:00,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3453119d0a3c4c0f8f248f0ea80c0561 2024-11-07T14:18:00,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2b258bd51ae54bf28373e660683a46ab as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab 2024-11-07T14:18:00,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab, entries=150, sequenceid=97, filesize=30.2 K 2024-11-07T14:18:00,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c037a8538adb488f9764fc97f809e86f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f 2024-11-07T14:18:00,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f, entries=150, sequenceid=97, filesize=11.7 K 2024-11-07T14:18:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3453119d0a3c4c0f8f248f0ea80c0561 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561 2024-11-07T14:18:00,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561, entries=150, sequenceid=97, filesize=11.7 K 2024-11-07T14:18:00,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for a97b90143d56ce006ffcb227cc121b11 in 181ms, sequenceid=97, compaction requested=true 2024-11-07T14:18:00,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:00,083 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:00,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:00,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:00,083 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:00,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:00,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:00,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:00,084 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:00,085 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=90.8 K 2024-11-07T14:18:00,085 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:00,085 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab] 2024-11-07T14:18:00,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:00,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:00,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:00,085 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:00,086 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:00,086 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:00,086 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e79b3572b5664850abfb5438787be9a4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.3 K 2024-11-07T14:18:00,086 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2afa080b5c3846f6885f5b92321d0b86, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078587 2024-11-07T14:18:00,086 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e79b3572b5664850abfb5438787be9a4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078587 2024-11-07T14:18:00,087 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c4f019cfd3454843ae131965ea75dead, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730989078948 2024-11-07T14:18:00,087 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82f91c3ffb4147d5962201b394e03f49, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730989078948 2024-11-07T14:18:00,088 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c037a8538adb488f9764fc97f809e86f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:00,088 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b258bd51ae54bf28373e660683a46ab, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:00,097 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:00,104 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:00,105 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/f4793cac7f9546acb82d2eefd8f93d81 is 50, key is test_row_0/B:col10/1730989079899/Put/seqid=0 2024-11-07T14:18:00,117 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411072fff873edcfc4dda857ef00414f96d8c_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:00,118 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411072fff873edcfc4dda857ef00414f96d8c_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:00,119 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072fff873edcfc4dda857ef00414f96d8c_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:00,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741962_1138 (size=4469) 2024-11-07T14:18:00,141 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#113 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:00,142 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/6b43dc06adc84b4085a0ae43ba3bae0e is 175, key is test_row_0/A:col10/1730989079899/Put/seqid=0 2024-11-07T14:18:00,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741961_1137 (size=12207) 2024-11-07T14:18:00,159 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/f4793cac7f9546acb82d2eefd8f93d81 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f4793cac7f9546acb82d2eefd8f93d81 2024-11-07T14:18:00,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741963_1139 (size=31161) 2024-11-07T14:18:00,167 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into f4793cac7f9546acb82d2eefd8f93d81(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:00,167 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:00,167 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=13, startTime=1730989080083; duration=0sec 2024-11-07T14:18:00,167 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:00,167 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:00,167 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:00,168 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:00,169 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:00,169 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:00,169 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/37d3f840407746548aced53860f1db69, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.3 K 2024-11-07T14:18:00,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 37d3f840407746548aced53860f1db69, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730989078587 2024-11-07T14:18:00,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1062487ac65b4c9d91c6625cdff4d3ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1730989078948 2024-11-07T14:18:00,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3453119d0a3c4c0f8f248f0ea80c0561, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:00,173 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/6b43dc06adc84b4085a0ae43ba3bae0e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e 2024-11-07T14:18:00,180 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into 6b43dc06adc84b4085a0ae43ba3bae0e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:00,180 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:00,180 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=13, startTime=1730989080083; duration=0sec 2024-11-07T14:18:00,180 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:00,180 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:00,185 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#115 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:00,186 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/01bf186612c74e11a385026af0ffb450 is 50, key is test_row_0/C:col10/1730989079899/Put/seqid=0 2024-11-07T14:18:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741964_1140 (size=12207) 2024-11-07T14:18:00,207 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/01bf186612c74e11a385026af0ffb450 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/01bf186612c74e11a385026af0ffb450 2024-11-07T14:18:00,215 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 01bf186612c74e11a385026af0ffb450(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:00,216 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:00,216 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=13, startTime=1730989080085; duration=0sec 2024-11-07T14:18:00,217 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:00,217 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:00,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:00,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:00,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d4880ccb5bcd4cf2a7b47c911aa7a11c_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989079922/Put/seqid=0 2024-11-07T14:18:00,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989140248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989140252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989140253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989140253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989140256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741965_1141 (size=12154) 2024-11-07T14:18:00,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989140355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989140355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989140356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989140356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989140359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989140559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989140561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989140561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989140559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989140564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,673 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:00,678 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d4880ccb5bcd4cf2a7b47c911aa7a11c_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d4880ccb5bcd4cf2a7b47c911aa7a11c_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:00,679 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b1888a3489f64d258697ca93a9fc36a9, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:00,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b1888a3489f64d258697ca93a9fc36a9 is 175, key is test_row_0/A:col10/1730989079922/Put/seqid=0 2024-11-07T14:18:00,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741966_1142 (size=30955) 2024-11-07T14:18:00,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989140863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989140864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989140864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989140865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:00,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989140868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,088 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b1888a3489f64d258697ca93a9fc36a9 2024-11-07T14:18:01,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/cd41cd2056f3468284ac77e25ee9e8b3 is 50, key is test_row_0/B:col10/1730989079922/Put/seqid=0 2024-11-07T14:18:01,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741967_1143 (size=12001) 2024-11-07T14:18:01,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989141367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:01,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989141368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989141368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989141370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989141372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-07T14:18:01,499 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-07T14:18:01,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:01,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-07T14:18:01,503 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:01,503 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:01,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:01,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/cd41cd2056f3468284ac77e25ee9e8b3 2024-11-07T14:18:01,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3c3e6583cd12416a9cc1a9c85eefb87a is 50, key is test_row_0/C:col10/1730989079922/Put/seqid=0 2024-11-07T14:18:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741968_1144 (size=12001) 2024-11-07T14:18:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:01,655 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-07T14:18:01,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:01,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:01,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:01,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:01,813 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-07T14:18:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:01,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:01,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3c3e6583cd12416a9cc1a9c85eefb87a 2024-11-07T14:18:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/b1888a3489f64d258697ca93a9fc36a9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9 2024-11-07T14:18:01,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9, entries=150, sequenceid=123, filesize=30.2 K 2024-11-07T14:18:01,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/cd41cd2056f3468284ac77e25ee9e8b3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3 2024-11-07T14:18:01,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3, entries=150, sequenceid=123, filesize=11.7 K 2024-11-07T14:18:01,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/3c3e6583cd12416a9cc1a9c85eefb87a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a 2024-11-07T14:18:01,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a, entries=150, sequenceid=123, filesize=11.7 K 2024-11-07T14:18:01,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a97b90143d56ce006ffcb227cc121b11 in 1723ms, sequenceid=123, compaction requested=false 2024-11-07T14:18:01,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:01,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:01,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-07T14:18:01,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:01,968 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:01,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078bf82944f706417d80f8729b225895f4_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989080248/Put/seqid=0 2024-11-07T14:18:02,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741969_1145 (size=12304) 2024-11-07T14:18:02,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:02,027 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078bf82944f706417d80f8729b225895f4_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078bf82944f706417d80f8729b225895f4_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:02,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/7ca76dd3a50b41dd8229f80a8a81d319, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:02,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/7ca76dd3a50b41dd8229f80a8a81d319 is 175, key is test_row_0/A:col10/1730989080248/Put/seqid=0 2024-11-07T14:18:02,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741970_1146 (size=31105) 2024-11-07T14:18:02,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:02,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:02,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989142424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989142424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989142427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989142428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989142428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,443 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/7ca76dd3a50b41dd8229f80a8a81d319 2024-11-07T14:18:02,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/2563e4b7263149c3a22fe8b0fe03fad4 is 50, key is test_row_0/B:col10/1730989080248/Put/seqid=0 2024-11-07T14:18:02,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741971_1147 (size=12151) 2024-11-07T14:18:02,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989142530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989142530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989142536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989142536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989142537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:02,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989142733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989142733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989142738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989142739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989142739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:02,873 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/2563e4b7263149c3a22fe8b0fe03fad4 2024-11-07T14:18:02,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/04abcf52db7345198ed2d9943844edd7 is 50, key is test_row_0/C:col10/1730989080248/Put/seqid=0 2024-11-07T14:18:02,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741972_1148 (size=12151) 2024-11-07T14:18:03,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989143037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989143037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989143043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989143043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989143045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,299 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/04abcf52db7345198ed2d9943844edd7 2024-11-07T14:18:03,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/7ca76dd3a50b41dd8229f80a8a81d319 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319 2024-11-07T14:18:03,312 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319, entries=150, sequenceid=136, filesize=30.4 K 2024-11-07T14:18:03,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/2563e4b7263149c3a22fe8b0fe03fad4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4 2024-11-07T14:18:03,320 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4, entries=150, sequenceid=136, filesize=11.9 K 2024-11-07T14:18:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/04abcf52db7345198ed2d9943844edd7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7 2024-11-07T14:18:03,327 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7, entries=150, sequenceid=136, filesize=11.9 K 2024-11-07T14:18:03,328 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for a97b90143d56ce006ffcb227cc121b11 in 1361ms, sequenceid=136, compaction requested=true 2024-11-07T14:18:03,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:03,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:03,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-07T14:18:03,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-07T14:18:03,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-07T14:18:03,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8260 sec 2024-11-07T14:18:03,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.8310 sec 2024-11-07T14:18:03,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:03,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:18:03,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:03,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:03,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:03,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:03,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:03,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:03,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989143549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989143549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989143550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989143551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989143552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107dffab3a315e845b3a9802cc47e2c9ef6_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:03,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741973_1149 (size=12304) 2024-11-07T14:18:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-07T14:18:03,609 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-07T14:18:03,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-07T14:18:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:03,612 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:03,612 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:03,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:03,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989143652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989143653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989143655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989143655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989143656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:03,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:03,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:03,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:03,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:03,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,828 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T14:18:03,828 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T14:18:03,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989143854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989143855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989143857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989143859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989143859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:03,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:03,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:03,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:03,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:03,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:03,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:03,963 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:03,967 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107dffab3a315e845b3a9802cc47e2c9ef6_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107dffab3a315e845b3a9802cc47e2c9ef6_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:03,968 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aad6d324472245a78a31f5662e9ab6c5, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:03,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aad6d324472245a78a31f5662e9ab6c5 is 175, key is test_row_0/A:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:03,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741974_1150 (size=31105) 2024-11-07T14:18:04,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989144160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989144160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989144160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989144161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989144161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:04,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,374 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=162, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aad6d324472245a78a31f5662e9ab6c5 2024-11-07T14:18:04,377 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3193e0ad222a4bdcbe3812eac9284210 is 50, key is test_row_0/B:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:04,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741975_1151 (size=12151) 2024-11-07T14:18:04,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989144667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989144668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989144668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989144668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:04,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989144671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,683 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,684 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:04,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3193e0ad222a4bdcbe3812eac9284210 2024-11-07T14:18:04,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1e674eb4b7a840e3b215933c67600a5a is 50, key is test_row_0/C:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741976_1152 (size=12151) 2024-11-07T14:18:04,836 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,989 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:04,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:04,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:04,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:04,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:04,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:05,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:05,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:05,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1e674eb4b7a840e3b215933c67600a5a 2024-11-07T14:18:05,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aad6d324472245a78a31f5662e9ab6c5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5 2024-11-07T14:18:05,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5, entries=150, sequenceid=162, filesize=30.4 K 2024-11-07T14:18:05,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3193e0ad222a4bdcbe3812eac9284210 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210 2024-11-07T14:18:05,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210, entries=150, sequenceid=162, filesize=11.9 K 2024-11-07T14:18:05,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1e674eb4b7a840e3b215933c67600a5a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a 2024-11-07T14:18:05,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a, entries=150, sequenceid=162, filesize=11.9 K 2024-11-07T14:18:05,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for a97b90143d56ce006ffcb227cc121b11 in 1684ms, sequenceid=162, compaction requested=true 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:05,226 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:05,226 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:05,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:05,228 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:05,228 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:05,228 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,228 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=121.4 K 2024-11-07T14:18:05,228 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,228 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:05,228 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5] 2024-11-07T14:18:05,228 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:05,228 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,228 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f4793cac7f9546acb82d2eefd8f93d81, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=47.4 K 2024-11-07T14:18:05,229 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f4793cac7f9546acb82d2eefd8f93d81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:05,229 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b43dc06adc84b4085a0ae43ba3bae0e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:05,229 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cd41cd2056f3468284ac77e25ee9e8b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1730989079922 2024-11-07T14:18:05,229 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1888a3489f64d258697ca93a9fc36a9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1730989079922 2024-11-07T14:18:05,230 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2563e4b7263149c3a22fe8b0fe03fad4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1730989080242 2024-11-07T14:18:05,230 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ca76dd3a50b41dd8229f80a8a81d319, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1730989080242 2024-11-07T14:18:05,230 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3193e0ad222a4bdcbe3812eac9284210, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:05,230 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aad6d324472245a78a31f5662e9ab6c5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:05,240 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:05,241 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:05,241 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3b08ada48672414eae6989d046ead0db is 50, key is test_row_0/B:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:05,244 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107119772b1a79643ddba7d6442ad30d965_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:05,248 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107119772b1a79643ddba7d6442ad30d965_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:05,248 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107119772b1a79643ddba7d6442ad30d965_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741977_1153 (size=12493) 2024-11-07T14:18:05,259 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3b08ada48672414eae6989d046ead0db as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3b08ada48672414eae6989d046ead0db 2024-11-07T14:18:05,264 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into 3b08ada48672414eae6989d046ead0db(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:05,264 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:05,264 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=12, startTime=1730989085226; duration=0sec 2024-11-07T14:18:05,265 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:05,265 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:05,265 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:05,266 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:05,266 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:05,266 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,267 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/01bf186612c74e11a385026af0ffb450, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=47.4 K 2024-11-07T14:18:05,267 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 01bf186612c74e11a385026af0ffb450, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1730989079590 2024-11-07T14:18:05,267 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c3e6583cd12416a9cc1a9c85eefb87a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1730989079922 2024-11-07T14:18:05,268 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 04abcf52db7345198ed2d9943844edd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1730989080242 2024-11-07T14:18:05,268 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e674eb4b7a840e3b215933c67600a5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:05,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741978_1154 (size=4469) 2024-11-07T14:18:05,280 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#127 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:05,280 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/327acd3581d14b6385fd5aa542350ff1 is 50, key is test_row_0/C:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:05,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741979_1155 (size=12493) 2024-11-07T14:18:05,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:05,296 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:05,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:05,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f746306dd2604ab0be33b1ed56842751_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989083550/Put/seqid=0 2024-11-07T14:18:05,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741980_1156 (size=12304) 2024-11-07T14:18:05,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:05,676 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#126 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:05,677 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e9c21dfa420a415fa8d85a77aa53002a is 175, key is test_row_0/A:col10/1730989082426/Put/seqid=0 2024-11-07T14:18:05,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741981_1157 (size=31447) 2024-11-07T14:18:05,698 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/327acd3581d14b6385fd5aa542350ff1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/327acd3581d14b6385fd5aa542350ff1 2024-11-07T14:18:05,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989145696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989145696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989145697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989145700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989145702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,704 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 327acd3581d14b6385fd5aa542350ff1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:05,704 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:05,704 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=12, startTime=1730989085226; duration=0sec 2024-11-07T14:18:05,704 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:05,704 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:05,714 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f746306dd2604ab0be33b1ed56842751_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f746306dd2604ab0be33b1ed56842751_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:05,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/f6fd37c8cbdd4791bb09143adee444a0, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:05,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/f6fd37c8cbdd4791bb09143adee444a0 is 175, key is test_row_0/A:col10/1730989083550/Put/seqid=0 2024-11-07T14:18:05,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741982_1158 (size=31105) 2024-11-07T14:18:05,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989145802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989145802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989145802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989145802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:05,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989145805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989146005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989146005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989146005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989146006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989146008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,089 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e9c21dfa420a415fa8d85a77aa53002a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a 2024-11-07T14:18:06,094 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into e9c21dfa420a415fa8d85a77aa53002a(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:06,095 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:06,095 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=12, startTime=1730989085226; duration=0sec 2024-11-07T14:18:06,095 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:06,095 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:06,122 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/f6fd37c8cbdd4791bb09143adee444a0 2024-11-07T14:18:06,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/1deaa270837646a0a780ecb13dfe4faa is 50, key is test_row_0/B:col10/1730989083550/Put/seqid=0 2024-11-07T14:18:06,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741983_1159 (size=12151) 2024-11-07T14:18:06,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989146309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989146310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989146310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989146311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989146311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,541 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/1deaa270837646a0a780ecb13dfe4faa 2024-11-07T14:18:06,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/116853124a9240f99f04114b0b487469 is 50, key is test_row_0/C:col10/1730989083550/Put/seqid=0 2024-11-07T14:18:06,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741984_1160 (size=12151) 2024-11-07T14:18:06,563 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/116853124a9240f99f04114b0b487469 2024-11-07T14:18:06,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/f6fd37c8cbdd4791bb09143adee444a0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0 2024-11-07T14:18:06,577 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0, entries=150, sequenceid=174, filesize=30.4 K 2024-11-07T14:18:06,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/1deaa270837646a0a780ecb13dfe4faa as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa 2024-11-07T14:18:06,584 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa, entries=150, sequenceid=174, filesize=11.9 K 2024-11-07T14:18:06,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/116853124a9240f99f04114b0b487469 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469 2024-11-07T14:18:06,593 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469, entries=150, sequenceid=174, filesize=11.9 K 2024-11-07T14:18:06,604 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for a97b90143d56ce006ffcb227cc121b11 in 1308ms, sequenceid=174, compaction requested=false 2024-11-07T14:18:06,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:06,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:06,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-07T14:18:06,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-07T14:18:06,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-07T14:18:06,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9930 sec 2024-11-07T14:18:06,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.9970 sec 2024-11-07T14:18:06,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:06,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-07T14:18:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:06,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:06,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989146848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989146848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989146851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989146852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989146848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110763fc0b12dbec4013aa5068e5934ce483_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:06,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741985_1161 (size=14794) 2024-11-07T14:18:06,884 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:06,891 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110763fc0b12dbec4013aa5068e5934ce483_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110763fc0b12dbec4013aa5068e5934ce483_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:06,893 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/da88539f76b34b56b1a08dc53c64d35d, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:06,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/da88539f76b34b56b1a08dc53c64d35d is 175, key is test_row_0/A:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:06,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741986_1162 (size=39749) 2024-11-07T14:18:06,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989146954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989146955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989146956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989146957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:06,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989146960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989147157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989147158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989147162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989147162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989147162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,312 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/da88539f76b34b56b1a08dc53c64d35d 2024-11-07T14:18:07,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c2d337a4a2b4493db2da3b44f8c057c6 is 50, key is test_row_0/B:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:07,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741987_1163 (size=12151) 2024-11-07T14:18:07,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989147462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989147464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989147465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989147466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989147466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-07T14:18:07,717 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-07T14:18:07,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-07T14:18:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T14:18:07,720 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:07,721 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:07,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:07,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c2d337a4a2b4493db2da3b44f8c057c6 2024-11-07T14:18:07,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/455e92903e0a4db681683c0176917b0b is 50, key is test_row_0/C:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:07,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741988_1164 (size=12151) 2024-11-07T14:18:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T14:18:07,873 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T14:18:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:07,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:07,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:07,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989147965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989147965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989147968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989147970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:07,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989147972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T14:18:08,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:08,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T14:18:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:08,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/455e92903e0a4db681683c0176917b0b 2024-11-07T14:18:08,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/da88539f76b34b56b1a08dc53c64d35d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d 2024-11-07T14:18:08,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d, entries=200, sequenceid=207, filesize=38.8 K 2024-11-07T14:18:08,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c2d337a4a2b4493db2da3b44f8c057c6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6 2024-11-07T14:18:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6, entries=150, sequenceid=207, filesize=11.9 K 2024-11-07T14:18:08,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/455e92903e0a4db681683c0176917b0b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b 2024-11-07T14:18:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b, entries=150, sequenceid=207, filesize=11.9 K 2024-11-07T14:18:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for a97b90143d56ce006ffcb227cc121b11 in 1318ms, sequenceid=207, compaction requested=true 2024-11-07T14:18:08,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:08,167 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:08,167 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,169 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:08,169 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,169 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,169 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=99.9 K 2024-11-07T14:18:08,169 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,170 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d] 2024-11-07T14:18:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:08,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:08,170 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9c21dfa420a415fa8d85a77aa53002a, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,171 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,171 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3b08ada48672414eae6989d046ead0db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.9 K 2024-11-07T14:18:08,171 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6fd37c8cbdd4791bb09143adee444a0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730989083543 2024-11-07T14:18:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b08ada48672414eae6989d046ead0db, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,172 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting da88539f76b34b56b1a08dc53c64d35d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:08,172 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1deaa270837646a0a780ecb13dfe4faa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730989083543 2024-11-07T14:18:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,176 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c2d337a4a2b4493db2da3b44f8c057c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-07T14:18:08,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,180 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,202 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,204 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#134 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,204 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/ea06fa46489e49e8a070bd7c95d5bf13 is 50, key is test_row_0/B:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,205 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107403deb39717e462fb01d91cb4a61dabf_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110710355fe471ca4f66a91415da88b2d99a_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989086851/Put/seqid=0 2024-11-07T14:18:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,210 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107403deb39717e462fb01d91cb4a61dabf_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:08,211 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107403deb39717e462fb01d91cb4a61dabf_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741989_1165 (size=12595) 2024-11-07T14:18:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741991_1167 (size=4469) 2024-11-07T14:18:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741990_1166 (size=7324) 2024-11-07T14:18:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T14:18:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,644 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/ea06fa46489e49e8a070bd7c95d5bf13 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/ea06fa46489e49e8a070bd7c95d5bf13 2024-11-07T14:18:08,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,651 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into ea06fa46489e49e8a070bd7c95d5bf13(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:08,651 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,652 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=13, startTime=1730989088167; duration=0sec 2024-11-07T14:18:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,652 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,652 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:08,652 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,654 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:08,654 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:08,654 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,654 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/327acd3581d14b6385fd5aa542350ff1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=35.9 K 2024-11-07T14:18:08,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,655 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 327acd3581d14b6385fd5aa542350ff1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1730989082426 2024-11-07T14:18:08,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,656 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 116853124a9240f99f04114b0b487469, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1730989083543 2024-11-07T14:18:08,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,656 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 455e92903e0a4db681683c0176917b0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,660 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#135 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:08,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,661 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/8c195013a54a4c35ba6b32d556f2f146 is 175, key is test_row_0/A:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:08,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,676 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110710355fe471ca4f66a91415da88b2d99a_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110710355fe471ca4f66a91415da88b2d99a_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/545ab18391ca44b1ab4ca07e653fbfe9, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/545ab18391ca44b1ab4ca07e653fbfe9 is 175, key is test_row_0/A:col10/1730989086851/Put/seqid=0 2024-11-07T14:18:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741992_1168 (size=31549) 2024-11-07T14:18:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,690 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#137 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,691 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/05606eb2b42c4fc194c07626ffe0a518 is 50, key is test_row_0/C:col10/1730989086813/Put/seqid=0 2024-11-07T14:18:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741994_1170 (size=12595) 2024-11-07T14:18:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741993_1169 (size=13815) 2024-11-07T14:18:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,714 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/545ab18391ca44b1ab4ca07e653fbfe9 2024-11-07T14:18:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/bb9040b364774daf8f4b818c9577e678 is 50, key is test_row_0/B:col10/1730989086851/Put/seqid=0 2024-11-07T14:18:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741995_1171 (size=7365) 2024-11-07T14:18:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,740 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/bb9040b364774daf8f4b818c9577e678 2024-11-07T14:18:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/55c971ce7d6d4b5e81c521dbf32826d9 is 50, key is test_row_0/C:col10/1730989086851/Put/seqid=0 2024-11-07T14:18:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741996_1172 (size=7365) 2024-11-07T14:18:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,764 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/55c971ce7d6d4b5e81c521dbf32826d9 2024-11-07T14:18:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/545ab18391ca44b1ab4ca07e653fbfe9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9 2024-11-07T14:18:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,778 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9, entries=50, sequenceid=212, filesize=13.5 K 2024-11-07T14:18:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/bb9040b364774daf8f4b818c9577e678 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678 2024-11-07T14:18:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,785 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678, entries=50, sequenceid=212, filesize=7.2 K 2024-11-07T14:18:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/55c971ce7d6d4b5e81c521dbf32826d9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9 2024-11-07T14:18:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,792 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9, entries=50, sequenceid=212, filesize=7.2 K 2024-11-07T14:18:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,793 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for a97b90143d56ce006ffcb227cc121b11 in 613ms, sequenceid=212, compaction requested=false 2024-11-07T14:18:08,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:08,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-07T14:18:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-07T14:18:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-07T14:18:08,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0740 sec 2024-11-07T14:18:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.0790 sec 2024-11-07T14:18:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-07T14:18:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,824 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-07T14:18:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-07T14:18:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,827 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,828 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:08,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T14:18:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T14:18:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,982 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-07T14:18:08,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-07T14:18:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-07T14:18:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,985 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-07T14:18:08,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 156 msec 2024-11-07T14:18:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 160 msec 2024-11-07T14:18:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:09,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:09,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110741d06897ae604a5086d7112cdad68e39_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:09,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741998_1174 (size=27248) 2024-11-07T14:18:09,093 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/8c195013a54a4c35ba6b32d556f2f146 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146 2024-11-07T14:18:09,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989149086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989149087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989149090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989149091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989149093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,100 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into 8c195013a54a4c35ba6b32d556f2f146(size=30.8 K), total size for store is 44.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:09,100 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:09,100 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=13, startTime=1730989088167; duration=0sec 2024-11-07T14:18:09,100 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:09,100 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:09,118 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/05606eb2b42c4fc194c07626ffe0a518 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/05606eb2b42c4fc194c07626ffe0a518 2024-11-07T14:18:09,125 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 05606eb2b42c4fc194c07626ffe0a518(size=12.3 K), total size for store is 19.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:09,125 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:09,125 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=13, startTime=1730989088167; duration=0sec 2024-11-07T14:18:09,125 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:09,125 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:09,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-07T14:18:09,132 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-07T14:18:09,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:09,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-07T14:18:09,137 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:09,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:09,138 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:09,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:09,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989149195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989149195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989149196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989149196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989149197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:09,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:09,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:09,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989149398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989149399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989149399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989149400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989149401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:09,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:09,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:09,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,482 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:09,486 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110741d06897ae604a5086d7112cdad68e39_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110741d06897ae604a5086d7112cdad68e39_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:09,487 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2c2f63bc2a1c4abd9513f216a7d94339, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:09,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2c2f63bc2a1c4abd9513f216a7d94339 is 175, key is test_row_0/A:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:09,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741997_1173 (size=83035) 2024-11-07T14:18:09,556 INFO [master/69430dbfd73f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T14:18:09,556 INFO [master/69430dbfd73f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T14:18:09,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:09,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:09,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989149701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989149702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989149703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989149703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989149704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:09,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,891 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=226, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2c2f63bc2a1c4abd9513f216a7d94339 2024-11-07T14:18:09,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e3996afe2f0f485591029d02a1c76663 is 50, key is test_row_0/B:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:09,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:09,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:09,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:09,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:09,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:09,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741999_1175 (size=12151) 2024-11-07T14:18:09,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e3996afe2f0f485591029d02a1c76663 2024-11-07T14:18:09,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/0f150e66ed8249b9a733aeb8b2f326ec is 50, key is test_row_0/C:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:09,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742000_1176 (size=12151) 2024-11-07T14:18:10,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:10,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:10,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989150205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989150206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:10,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989150206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:10,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:10,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989150209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:10,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989150209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:10,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/0f150e66ed8249b9a733aeb8b2f326ec 2024-11-07T14:18:10,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/2c2f63bc2a1c4abd9513f216a7d94339 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339 2024-11-07T14:18:10,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339, entries=450, sequenceid=226, filesize=81.1 K 2024-11-07T14:18:10,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e3996afe2f0f485591029d02a1c76663 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663 2024-11-07T14:18:10,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663, entries=150, sequenceid=226, filesize=11.9 K 2024-11-07T14:18:10,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/0f150e66ed8249b9a733aeb8b2f326ec as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec 2024-11-07T14:18:10,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec, entries=150, sequenceid=226, filesize=11.9 K 2024-11-07T14:18:10,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a97b90143d56ce006ffcb227cc121b11 in 1320ms, sequenceid=226, compaction requested=true 2024-11-07T14:18:10,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:10,355 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:10,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:18:10,356 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:10,356 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:10,356 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:10,356 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,356 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=125.4 K 2024-11-07T14:18:10,356 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,356 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339] 2024-11-07T14:18:10,357 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c195013a54a4c35ba6b32d556f2f146, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:10,357 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:10,357 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:10,358 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,358 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/ea06fa46489e49e8a070bd7c95d5bf13, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=31.4 K 2024-11-07T14:18:10,358 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 545ab18391ca44b1ab4ca07e653fbfe9, keycount=50, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989086851 2024-11-07T14:18:10,358 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ea06fa46489e49e8a070bd7c95d5bf13, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:10,358 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c2f63bc2a1c4abd9513f216a7d94339, keycount=450, bloomtype=ROW, size=81.1 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089023 2024-11-07T14:18:10,359 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting bb9040b364774daf8f4b818c9577e678, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989086851 2024-11-07T14:18:10,360 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e3996afe2f0f485591029d02a1c76663, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089031 2024-11-07T14:18:10,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:10,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-07T14:18:10,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,362 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:10,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:10,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110713dce815a3b840218c5e9898c81269a5_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989089091/Put/seqid=0 2024-11-07T14:18:10,384 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:10,387 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#145 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:10,387 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/cf867d8c85564f72ace51ad14edbd36d is 50, key is test_row_0/B:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:10,402 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107b46f0910ae7c44f0a167615bf615211c_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:10,405 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107b46f0910ae7c44f0a167615bf615211c_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:10,405 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b46f0910ae7c44f0a167615bf615211c_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:10,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742001_1177 (size=12304) 2024-11-07T14:18:10,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742002_1178 (size=12697) 2024-11-07T14:18:10,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742003_1179 (size=4469) 2024-11-07T14:18:10,432 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#144 average throughput is 0.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:10,433 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/15347fa45d594b86b03d12c517afc79c is 175, key is test_row_0/A:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:10,438 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/cf867d8c85564f72ace51ad14edbd36d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cf867d8c85564f72ace51ad14edbd36d 2024-11-07T14:18:10,443 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into cf867d8c85564f72ace51ad14edbd36d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:10,443 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:10,443 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=13, startTime=1730989090355; duration=0sec 2024-11-07T14:18:10,444 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:10,444 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:10,444 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:10,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742004_1180 (size=31651) 2024-11-07T14:18:10,446 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:10,446 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:10,446 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:10,446 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/05606eb2b42c4fc194c07626ffe0a518, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=31.4 K 2024-11-07T14:18:10,447 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 05606eb2b42c4fc194c07626ffe0a518, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1730989086813 2024-11-07T14:18:10,448 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 55c971ce7d6d4b5e81c521dbf32826d9, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1730989086851 2024-11-07T14:18:10,448 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f150e66ed8249b9a733aeb8b2f326ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089031 2024-11-07T14:18:10,452 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/15347fa45d594b86b03d12c517afc79c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c 2024-11-07T14:18:10,459 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#146 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:10,459 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into 15347fa45d594b86b03d12c517afc79c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:10,459 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:10,459 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=13, startTime=1730989090354; duration=0sec 2024-11-07T14:18:10,460 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/0adf09160d894b20a1ed663291a164b4 is 50, key is test_row_0/C:col10/1730989089031/Put/seqid=0 2024-11-07T14:18:10,460 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:10,460 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:10,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742005_1181 (size=12697) 2024-11-07T14:18:10,471 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/0adf09160d894b20a1ed663291a164b4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0adf09160d894b20a1ed663291a164b4 2024-11-07T14:18:10,476 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 0adf09160d894b20a1ed663291a164b4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:10,476 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:10,476 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=13, startTime=1730989090355; duration=0sec 2024-11-07T14:18:10,476 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:10,476 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:10,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:10,822 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110713dce815a3b840218c5e9898c81269a5_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110713dce815a3b840218c5e9898c81269a5_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:10,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/597f60ba9472488995287c8b129415e6, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:10,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/597f60ba9472488995287c8b129415e6 is 175, key is test_row_0/A:col10/1730989089091/Put/seqid=0 2024-11-07T14:18:10,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742006_1182 (size=31105) 2024-11-07T14:18:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:11,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989151218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989151218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989151219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989151219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989151221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,229 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/597f60ba9472488995287c8b129415e6 2024-11-07T14:18:11,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/f41a275edd924a948b4c87502d46e113 is 50, key is test_row_0/B:col10/1730989089091/Put/seqid=0 2024-11-07T14:18:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742007_1183 (size=12151) 2024-11-07T14:18:11,246 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/f41a275edd924a948b4c87502d46e113 2024-11-07T14:18:11,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e9c1f0a181c4d3e98b266df0218b6d0 is 50, key is test_row_0/C:col10/1730989089091/Put/seqid=0 2024-11-07T14:18:11,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742008_1184 (size=12151) 2024-11-07T14:18:11,259 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e9c1f0a181c4d3e98b266df0218b6d0 2024-11-07T14:18:11,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/597f60ba9472488995287c8b129415e6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6 2024-11-07T14:18:11,268 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6, entries=150, sequenceid=252, filesize=30.4 K 2024-11-07T14:18:11,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/f41a275edd924a948b4c87502d46e113 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113 2024-11-07T14:18:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,274 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:18:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7e9c1f0a181c4d3e98b266df0218b6d0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0 2024-11-07T14:18:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,281 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:18:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,282 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a97b90143d56ce006ffcb227cc121b11 in 921ms, sequenceid=252, compaction requested=false 2024-11-07T14:18:11,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:11,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:11,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-07T14:18:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-07T14:18:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-07T14:18:11,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1460 sec 2024-11-07T14:18:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.1520 sec 2024-11-07T14:18:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075529d23274404cdf94d3807315cba977_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742009_1185 (size=14994) 2024-11-07T14:18:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,393 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,399 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075529d23274404cdf94d3807315cba977_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075529d23274404cdf94d3807315cba977_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,400 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/fb4e33decdec421db6942b2725b8ff48, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/fb4e33decdec421db6942b2725b8ff48 is 175, key is test_row_0/A:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742010_1186 (size=39945) 2024-11-07T14:18:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:11,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989151410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989151411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989151414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989151515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989151515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989151516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989151718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989151719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989151719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:11,811 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=269, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/fb4e33decdec421db6942b2725b8ff48 2024-11-07T14:18:11,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c371e8d977dd42d6b557b8955a4e84c1 is 50, key is test_row_0/B:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:11,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742011_1187 (size=9857) 2024-11-07T14:18:12,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989152022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989152023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989152024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c371e8d977dd42d6b557b8955a4e84c1 2024-11-07T14:18:12,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/8d4fc18342e64bb8b4e7a93e56544381 is 50, key is test_row_0/C:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:12,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742012_1188 (size=9857) 2024-11-07T14:18:12,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989152524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989152530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:12,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989152531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:12,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/8d4fc18342e64bb8b4e7a93e56544381 2024-11-07T14:18:12,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/fb4e33decdec421db6942b2725b8ff48 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48 2024-11-07T14:18:12,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48, entries=200, sequenceid=269, filesize=39.0 K 2024-11-07T14:18:12,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/c371e8d977dd42d6b557b8955a4e84c1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1 2024-11-07T14:18:12,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1, entries=100, sequenceid=269, filesize=9.6 K 2024-11-07T14:18:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/8d4fc18342e64bb8b4e7a93e56544381 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381 2024-11-07T14:18:12,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381, entries=100, sequenceid=269, filesize=9.6 K 2024-11-07T14:18:12,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a97b90143d56ce006ffcb227cc121b11 in 1328ms, sequenceid=269, compaction requested=true 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:12,672 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:12,672 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:12,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:12,674 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102701 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:12,674 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:12,674 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:12,674 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:12,674 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:12,674 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:12,674 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cf867d8c85564f72ace51ad14edbd36d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=33.9 K 2024-11-07T14:18:12,674 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=100.3 K 2024-11-07T14:18:12,674 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:12,675 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48] 2024-11-07T14:18:12,675 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15347fa45d594b86b03d12c517afc79c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089031 2024-11-07T14:18:12,675 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cf867d8c85564f72ace51ad14edbd36d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089031 2024-11-07T14:18:12,676 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f41a275edd924a948b4c87502d46e113, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989089086 2024-11-07T14:18:12,676 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 597f60ba9472488995287c8b129415e6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989089086 2024-11-07T14:18:12,676 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c371e8d977dd42d6b557b8955a4e84c1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989091219 2024-11-07T14:18:12,676 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb4e33decdec421db6942b2725b8ff48, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989091217 2024-11-07T14:18:12,684 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:12,685 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#152 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:12,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/6ed5bceddb8f4745a8d508176bff3912 is 50, key is test_row_0/B:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:12,689 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107c3df56e0e5fa419abd09c0b87b779776_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:12,691 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107c3df56e0e5fa419abd09c0b87b779776_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:12,691 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c3df56e0e5fa419abd09c0b87b779776_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:12,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742013_1189 (size=12899) 2024-11-07T14:18:12,704 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/6ed5bceddb8f4745a8d508176bff3912 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/6ed5bceddb8f4745a8d508176bff3912 2024-11-07T14:18:12,710 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into 6ed5bceddb8f4745a8d508176bff3912(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:12,710 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:12,710 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=13, startTime=1730989092672; duration=0sec 2024-11-07T14:18:12,710 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:12,710 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:12,710 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:12,712 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:12,712 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:12,712 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:12,712 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0adf09160d894b20a1ed663291a164b4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=33.9 K 2024-11-07T14:18:12,713 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0adf09160d894b20a1ed663291a164b4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1730989089031 2024-11-07T14:18:12,713 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e9c1f0a181c4d3e98b266df0218b6d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989089086 2024-11-07T14:18:12,714 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d4fc18342e64bb8b4e7a93e56544381, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989091219 2024-11-07T14:18:12,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742014_1190 (size=4469) 2024-11-07T14:18:12,723 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#153 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:12,724 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/ce0c07d0aa1c465982503cc72fb314ac is 175, key is test_row_0/A:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:12,725 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#154 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:12,726 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/a9479a07811547baad32926505f565a0 is 50, key is test_row_0/C:col10/1730989091344/Put/seqid=0 2024-11-07T14:18:12,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742015_1191 (size=31960) 2024-11-07T14:18:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742016_1192 (size=12899) 2024-11-07T14:18:13,144 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/ce0c07d0aa1c465982503cc72fb314ac as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac 2024-11-07T14:18:13,151 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/a9479a07811547baad32926505f565a0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/a9479a07811547baad32926505f565a0 2024-11-07T14:18:13,155 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into ce0c07d0aa1c465982503cc72fb314ac(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:13,155 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:13,155 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=13, startTime=1730989092672; duration=0sec 2024-11-07T14:18:13,155 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:13,156 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:13,158 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into a9479a07811547baad32926505f565a0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:13,158 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:13,158 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=13, startTime=1730989092672; duration=0sec 2024-11-07T14:18:13,158 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:13,159 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:13,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:13,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:13,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070eb6c7990f324feaa73ed9f6de2e3481_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989091393/Put/seqid=0 2024-11-07T14:18:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-07T14:18:13,243 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-07T14:18:13,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-07T14:18:13,246 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:13,246 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:13,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:13,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989153249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989153249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742017_1193 (size=14994) 2024-11-07T14:18:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:13,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989153351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989153351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:13,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:13,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989153535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989153539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989153540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:13,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:13,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:13,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989153553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989153553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,667 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:13,673 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070eb6c7990f324feaa73ed9f6de2e3481_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070eb6c7990f324feaa73ed9f6de2e3481_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:13,675 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aceef2d3b0f3444cbeba0a5a8fc7aed0, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:13,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 is 175, key is test_row_0/A:col10/1730989091393/Put/seqid=0 2024-11-07T14:18:13,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742018_1194 (size=39949) 2024-11-07T14:18:13,683 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 2024-11-07T14:18:13,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/176b17190846455fa419cda63f708b29 is 50, key is test_row_0/B:col10/1730989091393/Put/seqid=0 2024-11-07T14:18:13,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742019_1195 (size=12301) 2024-11-07T14:18:13,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:13,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:13,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:13,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:13,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:13,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:13,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:13,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989153856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:13,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989153868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/176b17190846455fa419cda63f708b29 2024-11-07T14:18:14,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/ab4d4915fb72493a8add4230c79c71a8 is 50, key is test_row_0/C:col10/1730989091393/Put/seqid=0 2024-11-07T14:18:14,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742020_1196 (size=12301) 2024-11-07T14:18:14,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:14,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:14,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:14,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:14,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:14,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:14,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989154360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989154375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:14,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/ab4d4915fb72493a8add4230c79c71a8 2024-11-07T14:18:14,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 2024-11-07T14:18:14,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0, entries=200, sequenceid=295, filesize=39.0 K 2024-11-07T14:18:14,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/176b17190846455fa419cda63f708b29 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29 2024-11-07T14:18:14,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29, entries=150, sequenceid=295, filesize=12.0 K 2024-11-07T14:18:14,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/ab4d4915fb72493a8add4230c79c71a8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8 2024-11-07T14:18:14,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8, entries=150, sequenceid=295, filesize=12.0 K 2024-11-07T14:18:14,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for a97b90143d56ce006ffcb227cc121b11 in 1302ms, sequenceid=295, compaction requested=false 2024-11-07T14:18:14,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:14,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:14,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-07T14:18:14,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:14,627 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:14,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107aadbb56d6e7e496da127838f6f7e864a_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989093245/Put/seqid=0 2024-11-07T14:18:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742021_1197 (size=12454) 2024-11-07T14:18:14,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:14,646 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107aadbb56d6e7e496da127838f6f7e864a_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107aadbb56d6e7e496da127838f6f7e864a_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:14,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e5d241ed8b70472bb3ebb81eb8334f85, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:14,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e5d241ed8b70472bb3ebb81eb8334f85 is 175, key is test_row_0/A:col10/1730989093245/Put/seqid=0 2024-11-07T14:18:14,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742022_1198 (size=31255) 2024-11-07T14:18:15,052 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=309, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e5d241ed8b70472bb3ebb81eb8334f85 2024-11-07T14:18:15,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/8af38f44d38742eeb76d160052c9133b is 50, key is test_row_0/B:col10/1730989093245/Put/seqid=0 2024-11-07T14:18:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742023_1199 (size=12301) 2024-11-07T14:18:15,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:15,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:15,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989155400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989155402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,480 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/8af38f44d38742eeb76d160052c9133b 2024-11-07T14:18:15,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/d4321d6f01ff4f9fb26fefacfbfbfd3d is 50, key is test_row_0/C:col10/1730989093245/Put/seqid=0 2024-11-07T14:18:15,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742024_1200 (size=12301) 2024-11-07T14:18:15,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989155504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989155504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1730989155549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,551 DEBUG [Thread-574 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:15,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1730989155550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,554 DEBUG [Thread-578 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:15,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1730989155555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,556 DEBUG [Thread-572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:15,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989155707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:15,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989155707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:15,894 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/d4321d6f01ff4f9fb26fefacfbfbfd3d 2024-11-07T14:18:15,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/e5d241ed8b70472bb3ebb81eb8334f85 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85 2024-11-07T14:18:15,904 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85, entries=150, sequenceid=309, filesize=30.5 K 2024-11-07T14:18:15,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/8af38f44d38742eeb76d160052c9133b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b 2024-11-07T14:18:15,909 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T14:18:15,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/d4321d6f01ff4f9fb26fefacfbfbfd3d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d 2024-11-07T14:18:15,917 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d, entries=150, sequenceid=309, filesize=12.0 K 2024-11-07T14:18:15,918 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a97b90143d56ce006ffcb227cc121b11 in 1291ms, sequenceid=309, compaction requested=true 2024-11-07T14:18:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-07T14:18:15,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-07T14:18:15,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-07T14:18:15,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6720 sec 2024-11-07T14:18:15,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.6760 sec 2024-11-07T14:18:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:16,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:16,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f7076022701249afbb9f8a4a24bed844_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:16,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742025_1201 (size=14994) 2024-11-07T14:18:16,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989156027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989156030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989156131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989156134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989156333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989156336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,428 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:16,432 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f7076022701249afbb9f8a4a24bed844_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f7076022701249afbb9f8a4a24bed844_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:16,433 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/c2908e140bb94390ba8ff5b0abe70eed, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:16,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/c2908e140bb94390ba8ff5b0abe70eed is 175, key is test_row_0/A:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:16,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742026_1202 (size=39949) 2024-11-07T14:18:16,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989156637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:16,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989156640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:16,839 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/c2908e140bb94390ba8ff5b0abe70eed 2024-11-07T14:18:16,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3751b1a94971414289fb096ac71daa6f is 50, key is test_row_0/B:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:16,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742027_1203 (size=12301) 2024-11-07T14:18:17,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:17,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33472 deadline: 1730989157141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:17,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:17,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33480 deadline: 1730989157144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:17,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3751b1a94971414289fb096ac71daa6f 2024-11-07T14:18:17,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7f0a70f39c4144198f0964c945fe46dc is 50, key is test_row_0/C:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742028_1204 (size=12301) 2024-11-07T14:18:17,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7f0a70f39c4144198f0964c945fe46dc 2024-11-07T14:18:17,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/c2908e140bb94390ba8ff5b0abe70eed as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed 2024-11-07T14:18:17,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed, entries=200, sequenceid=332, filesize=39.0 K 2024-11-07T14:18:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/3751b1a94971414289fb096ac71daa6f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f 2024-11-07T14:18:17,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T14:18:17,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7f0a70f39c4144198f0964c945fe46dc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc 2024-11-07T14:18:17,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T14:18:17,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a97b90143d56ce006ffcb227cc121b11 in 1275ms, sequenceid=332, compaction requested=true 2024-11-07T14:18:17,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:17,287 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:17,287 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a97b90143d56ce006ffcb227cc121b11:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:17,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:17,288 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:17,288 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143113 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:17,288 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/B is initiating minor compaction (all files) 2024-11-07T14:18:17,288 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/A is initiating minor compaction (all files) 2024-11-07T14:18:17,288 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/B in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:17,288 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/A in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:17,289 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/6ed5bceddb8f4745a8d508176bff3912, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=48.6 K 2024-11-07T14:18:17,289 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=139.8 K 2024-11-07T14:18:17,289 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:17,289 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed] 2024-11-07T14:18:17,289 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce0c07d0aa1c465982503cc72fb314ac, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989089088 2024-11-07T14:18:17,289 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed5bceddb8f4745a8d508176bff3912, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989089088 2024-11-07T14:18:17,290 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting aceef2d3b0f3444cbeba0a5a8fc7aed0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730989091393 2024-11-07T14:18:17,290 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 176b17190846455fa419cda63f708b29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730989091393 2024-11-07T14:18:17,290 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5d241ed8b70472bb3ebb81eb8334f85, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730989093241 2024-11-07T14:18:17,290 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8af38f44d38742eeb76d160052c9133b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730989093241 2024-11-07T14:18:17,290 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2908e140bb94390ba8ff5b0abe70eed, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730989095387 2024-11-07T14:18:17,291 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3751b1a94971414289fb096ac71daa6f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730989095399 2024-11-07T14:18:17,303 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#B#compaction#164 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:17,303 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/7bd3993c146f42d8afdbc61d6431033c is 50, key is test_row_0/B:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:17,308 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:17,318 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107bcdce3ed162e48cdb18e01d1a4fb6fc6_a97b90143d56ce006ffcb227cc121b11 store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:17,321 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107bcdce3ed162e48cdb18e01d1a4fb6fc6_a97b90143d56ce006ffcb227cc121b11, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:17,321 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107bcdce3ed162e48cdb18e01d1a4fb6fc6_a97b90143d56ce006ffcb227cc121b11 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:17,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742029_1205 (size=13085) 2024-11-07T14:18:17,329 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/7bd3993c146f42d8afdbc61d6431033c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/7bd3993c146f42d8afdbc61d6431033c 2024-11-07T14:18:17,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742030_1206 (size=4469) 2024-11-07T14:18:17,335 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/B of a97b90143d56ce006ffcb227cc121b11 into 7bd3993c146f42d8afdbc61d6431033c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:17,335 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:17,335 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/B, priority=12, startTime=1730989097287; duration=0sec 2024-11-07T14:18:17,335 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:17,335 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:B 2024-11-07T14:18:17,335 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:17,337 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:17,337 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): a97b90143d56ce006ffcb227cc121b11/C is initiating minor compaction (all files) 2024-11-07T14:18:17,337 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a97b90143d56ce006ffcb227cc121b11/C in TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:17,337 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/a9479a07811547baad32926505f565a0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp, totalSize=48.6 K 2024-11-07T14:18:17,338 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a9479a07811547baad32926505f565a0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1730989089088 2024-11-07T14:18:17,338 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ab4d4915fb72493a8add4230c79c71a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1730989091393 2024-11-07T14:18:17,338 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d4321d6f01ff4f9fb26fefacfbfbfd3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1730989093241 2024-11-07T14:18:17,339 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f0a70f39c4144198f0964c945fe46dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1730989095399 2024-11-07T14:18:17,349 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#C#compaction#166 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:17,349 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1a5f66da51134691a278f7c198478ab3 is 50, key is test_row_0/C:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:17,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742031_1207 (size=13085) 2024-11-07T14:18:17,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-07T14:18:17,357 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-07T14:18:17,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-07T14:18:17,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:17,360 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:17,360 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:17,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:17,388 DEBUG [Thread-583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:51818 2024-11-07T14:18:17,388 DEBUG [Thread-583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:17,389 DEBUG [Thread-587 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:51818 2024-11-07T14:18:17,389 DEBUG [Thread-587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:17,389 DEBUG [Thread-585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:51818 2024-11-07T14:18:17,389 DEBUG [Thread-585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:17,392 DEBUG [Thread-589 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:51818 2024-11-07T14:18:17,392 DEBUG [Thread-589 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:17,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:17,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:17,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:17,512 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:17,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a5643913735249f2bda2974a49444478_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989096025/Put/seqid=0 2024-11-07T14:18:17,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742032_1208 (size=12454) 2024-11-07T14:18:17,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:17,733 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a97b90143d56ce006ffcb227cc121b11#A#compaction#165 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:17,734 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/dc404b5568e14e0484cf4c80321886c7 is 175, key is test_row_0/A:col10/1730989096010/Put/seqid=0 2024-11-07T14:18:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742033_1209 (size=32039) 2024-11-07T14:18:17,758 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/1a5f66da51134691a278f7c198478ab3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1a5f66da51134691a278f7c198478ab3 2024-11-07T14:18:17,762 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/C of a97b90143d56ce006ffcb227cc121b11 into 1a5f66da51134691a278f7c198478ab3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:17,762 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:17,762 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/C, priority=12, startTime=1730989097287; duration=0sec 2024-11-07T14:18:17,762 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:17,762 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:C 2024-11-07T14:18:17,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:17,928 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107a5643913735249f2bda2974a49444478_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a5643913735249f2bda2974a49444478_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:17,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/d683b6991d4847698c10f4b502ceaed6, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:17,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/d683b6991d4847698c10f4b502ceaed6 is 175, key is test_row_0/A:col10/1730989096025/Put/seqid=0 2024-11-07T14:18:17,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742034_1210 (size=31255) 2024-11-07T14:18:17,933 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=346, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/d683b6991d4847698c10f4b502ceaed6 2024-11-07T14:18:17,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e01f9b2a82ca4145acd835b2ba0ed295 is 50, key is test_row_0/B:col10/1730989096025/Put/seqid=0 2024-11-07T14:18:17,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742035_1211 (size=12301) 2024-11-07T14:18:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:18,142 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/dc404b5568e14e0484cf4c80321886c7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/dc404b5568e14e0484cf4c80321886c7 2024-11-07T14:18:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:18,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. as already flushing 2024-11-07T14:18:18,145 DEBUG [Thread-576 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:51818 2024-11-07T14:18:18,145 DEBUG [Thread-576 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:18,147 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a97b90143d56ce006ffcb227cc121b11/A of a97b90143d56ce006ffcb227cc121b11 into dc404b5568e14e0484cf4c80321886c7(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:18,147 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:18,147 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11., storeName=a97b90143d56ce006ffcb227cc121b11/A, priority=12, startTime=1730989097286; duration=0sec 2024-11-07T14:18:18,147 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:18,147 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a97b90143d56ce006ffcb227cc121b11:A 2024-11-07T14:18:18,162 DEBUG [Thread-580 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:51818 2024-11-07T14:18:18,162 DEBUG [Thread-580 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:18,345 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e01f9b2a82ca4145acd835b2ba0ed295 2024-11-07T14:18:18,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7b7b24bb10fc4e09b1bbd26acb13956e is 50, key is test_row_0/C:col10/1730989096025/Put/seqid=0 2024-11-07T14:18:18,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742036_1212 (size=12301) 2024-11-07T14:18:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:18,756 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7b7b24bb10fc4e09b1bbd26acb13956e 2024-11-07T14:18:18,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/d683b6991d4847698c10f4b502ceaed6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/d683b6991d4847698c10f4b502ceaed6 2024-11-07T14:18:18,765 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/d683b6991d4847698c10f4b502ceaed6, entries=150, sequenceid=346, filesize=30.5 K 2024-11-07T14:18:18,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/e01f9b2a82ca4145acd835b2ba0ed295 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e01f9b2a82ca4145acd835b2ba0ed295 2024-11-07T14:18:18,769 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e01f9b2a82ca4145acd835b2ba0ed295, entries=150, sequenceid=346, filesize=12.0 K 2024-11-07T14:18:18,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/7b7b24bb10fc4e09b1bbd26acb13956e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7b7b24bb10fc4e09b1bbd26acb13956e 2024-11-07T14:18:18,773 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7b7b24bb10fc4e09b1bbd26acb13956e, entries=150, sequenceid=346, filesize=12.0 K 2024-11-07T14:18:18,773 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=13.42 KB/13740 for a97b90143d56ce006ffcb227cc121b11 in 1261ms, sequenceid=346, compaction requested=false 2024-11-07T14:18:18,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:18,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:18,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-07T14:18:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-07T14:18:18,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-07T14:18:18,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4150 sec 2024-11-07T14:18:18,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4180 sec 2024-11-07T14:18:19,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-07T14:18:19,464 INFO [Thread-582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-07T14:18:19,562 DEBUG [Thread-578 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:51818 2024-11-07T14:18:19,562 DEBUG [Thread-578 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:19,583 DEBUG [Thread-574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:51818 2024-11-07T14:18:19,583 DEBUG [Thread-574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:19,590 DEBUG [Thread-572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7362d978 to 127.0.0.1:51818 2024-11-07T14:18:19,590 DEBUG [Thread-572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7109 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7419 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3085 2024-11-07T14:18:19,590 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9255 rows 2024-11-07T14:18:19,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3069 2024-11-07T14:18:19,591 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9203 rows 2024-11-07T14:18:19,591 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:18:19,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04ddf4c3 to 127.0.0.1:51818 2024-11-07T14:18:19,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:19,593 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:18:19,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:18:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:19,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:19,597 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989099597"}]},"ts":"1730989099597"} 2024-11-07T14:18:19,598 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:18:19,602 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:18:19,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:18:19,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, UNASSIGN}] 2024-11-07T14:18:19,604 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, UNASSIGN 2024-11-07T14:18:19,604 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:19,605 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:18:19,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:19,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:19,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:19,757 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing a97b90143d56ce006ffcb227cc121b11, disabling compactions & flushes 2024-11-07T14:18:19,757 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. after waiting 0 ms 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:19,757 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing a97b90143d56ce006ffcb227cc121b11 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=A 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=B 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a97b90143d56ce006ffcb227cc121b11, store=C 2024-11-07T14:18:19,757 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:19,763 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070f1bb190fe6d43b992d699745e4097d3_a97b90143d56ce006ffcb227cc121b11 is 50, key is test_row_0/A:col10/1730989099589/Put/seqid=0 2024-11-07T14:18:19,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742037_1213 (size=12454) 2024-11-07T14:18:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:20,168 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:20,172 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411070f1bb190fe6d43b992d699745e4097d3_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070f1bb190fe6d43b992d699745e4097d3_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:20,173 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/179268ed77164941be12572c9dd09243, store: [table=TestAcidGuarantees family=A region=a97b90143d56ce006ffcb227cc121b11] 2024-11-07T14:18:20,173 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/179268ed77164941be12572c9dd09243 is 175, key is test_row_0/A:col10/1730989099589/Put/seqid=0 2024-11-07T14:18:20,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742038_1214 (size=31255) 2024-11-07T14:18:20,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:20,578 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/179268ed77164941be12572c9dd09243 2024-11-07T14:18:20,584 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/80523eb709034d1db562254dffa2398f is 50, key is test_row_0/B:col10/1730989099589/Put/seqid=0 2024-11-07T14:18:20,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742039_1215 (size=12301) 2024-11-07T14:18:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:20,989 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/80523eb709034d1db562254dffa2398f 2024-11-07T14:18:20,996 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/c368e16011b042ca92eed7ce6750fd7d is 50, key is test_row_0/C:col10/1730989099589/Put/seqid=0 2024-11-07T14:18:20,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742040_1216 (size=12301) 2024-11-07T14:18:21,400 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/c368e16011b042ca92eed7ce6750fd7d 2024-11-07T14:18:21,404 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/A/179268ed77164941be12572c9dd09243 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/179268ed77164941be12572c9dd09243 2024-11-07T14:18:21,408 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/179268ed77164941be12572c9dd09243, entries=150, sequenceid=356, filesize=30.5 K 2024-11-07T14:18:21,408 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/B/80523eb709034d1db562254dffa2398f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/80523eb709034d1db562254dffa2398f 2024-11-07T14:18:21,412 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/80523eb709034d1db562254dffa2398f, entries=150, sequenceid=356, filesize=12.0 K 2024-11-07T14:18:21,412 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/.tmp/C/c368e16011b042ca92eed7ce6750fd7d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/c368e16011b042ca92eed7ce6750fd7d 2024-11-07T14:18:21,416 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/c368e16011b042ca92eed7ce6750fd7d, entries=150, sequenceid=356, filesize=12.0 K 2024-11-07T14:18:21,416 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a97b90143d56ce006ffcb227cc121b11 in 1659ms, sequenceid=356, compaction requested=true 2024-11-07T14:18:21,417 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed] to archive 2024-11-07T14:18:21,418 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:21,420 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b4729696b1e24a6a9ed6287989dfd441 2024-11-07T14:18:21,421 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/918fd67798a34102b3cc3b60a646bd73 2024-11-07T14:18:21,422 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/a4edbfffbc654f2b8db5b490e8cd5d27 2024-11-07T14:18:21,423 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2afa080b5c3846f6885f5b92321d0b86 2024-11-07T14:18:21,424 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/82f91c3ffb4147d5962201b394e03f49 2024-11-07T14:18:21,425 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/6b43dc06adc84b4085a0ae43ba3bae0e 2024-11-07T14:18:21,426 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2b258bd51ae54bf28373e660683a46ab 2024-11-07T14:18:21,427 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/b1888a3489f64d258697ca93a9fc36a9 2024-11-07T14:18:21,428 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/7ca76dd3a50b41dd8229f80a8a81d319 2024-11-07T14:18:21,429 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e9c21dfa420a415fa8d85a77aa53002a 2024-11-07T14:18:21,430 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aad6d324472245a78a31f5662e9ab6c5 2024-11-07T14:18:21,431 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/f6fd37c8cbdd4791bb09143adee444a0 2024-11-07T14:18:21,432 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/da88539f76b34b56b1a08dc53c64d35d 2024-11-07T14:18:21,433 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/8c195013a54a4c35ba6b32d556f2f146 2024-11-07T14:18:21,434 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/545ab18391ca44b1ab4ca07e653fbfe9 2024-11-07T14:18:21,436 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/2c2f63bc2a1c4abd9513f216a7d94339 2024-11-07T14:18:21,437 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/15347fa45d594b86b03d12c517afc79c 2024-11-07T14:18:21,438 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/597f60ba9472488995287c8b129415e6 2024-11-07T14:18:21,439 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/fb4e33decdec421db6942b2725b8ff48 2024-11-07T14:18:21,440 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/ce0c07d0aa1c465982503cc72fb314ac 2024-11-07T14:18:21,441 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/aceef2d3b0f3444cbeba0a5a8fc7aed0 2024-11-07T14:18:21,442 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/e5d241ed8b70472bb3ebb81eb8334f85 2024-11-07T14:18:21,443 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/c2908e140bb94390ba8ff5b0abe70eed 2024-11-07T14:18:21,445 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e79b3572b5664850abfb5438787be9a4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f4793cac7f9546acb82d2eefd8f93d81, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3b08ada48672414eae6989d046ead0db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/ea06fa46489e49e8a070bd7c95d5bf13, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cf867d8c85564f72ace51ad14edbd36d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/6ed5bceddb8f4745a8d508176bff3912, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f] to archive 2024-11-07T14:18:21,445 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:21,447 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/a84a6a641b584866ae4945bb123c29cd 2024-11-07T14:18:21,448 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3a1e6b8bcce445328d5067dcb1276524 2024-11-07T14:18:21,449 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e79b3572b5664850abfb5438787be9a4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e79b3572b5664850abfb5438787be9a4 2024-11-07T14:18:21,450 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/d4680b443a03492c9d561f463dad31af 2024-11-07T14:18:21,451 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c4f019cfd3454843ae131965ea75dead 2024-11-07T14:18:21,452 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f4793cac7f9546acb82d2eefd8f93d81 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f4793cac7f9546acb82d2eefd8f93d81 2024-11-07T14:18:21,453 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c037a8538adb488f9764fc97f809e86f 2024-11-07T14:18:21,454 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cd41cd2056f3468284ac77e25ee9e8b3 2024-11-07T14:18:21,455 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/2563e4b7263149c3a22fe8b0fe03fad4 2024-11-07T14:18:21,456 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3b08ada48672414eae6989d046ead0db to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3b08ada48672414eae6989d046ead0db 2024-11-07T14:18:21,457 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3193e0ad222a4bdcbe3812eac9284210 2024-11-07T14:18:21,458 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/1deaa270837646a0a780ecb13dfe4faa 2024-11-07T14:18:21,459 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/ea06fa46489e49e8a070bd7c95d5bf13 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/ea06fa46489e49e8a070bd7c95d5bf13 2024-11-07T14:18:21,460 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c2d337a4a2b4493db2da3b44f8c057c6 2024-11-07T14:18:21,460 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/bb9040b364774daf8f4b818c9577e678 2024-11-07T14:18:21,461 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cf867d8c85564f72ace51ad14edbd36d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/cf867d8c85564f72ace51ad14edbd36d 2024-11-07T14:18:21,462 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e3996afe2f0f485591029d02a1c76663 2024-11-07T14:18:21,463 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/f41a275edd924a948b4c87502d46e113 2024-11-07T14:18:21,464 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/6ed5bceddb8f4745a8d508176bff3912 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/6ed5bceddb8f4745a8d508176bff3912 2024-11-07T14:18:21,465 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/c371e8d977dd42d6b557b8955a4e84c1 2024-11-07T14:18:21,466 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/176b17190846455fa419cda63f708b29 2024-11-07T14:18:21,467 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/8af38f44d38742eeb76d160052c9133b 2024-11-07T14:18:21,468 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/3751b1a94971414289fb096ac71daa6f 2024-11-07T14:18:21,469 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/37d3f840407746548aced53860f1db69, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/01bf186612c74e11a385026af0ffb450, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/327acd3581d14b6385fd5aa542350ff1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/05606eb2b42c4fc194c07626ffe0a518, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0adf09160d894b20a1ed663291a164b4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/a9479a07811547baad32926505f565a0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc] to archive 2024-11-07T14:18:21,470 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:21,472 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e763f5e272c445a98de282bf10d07fe 2024-11-07T14:18:21,473 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/e587b520efa24be6acefce6f33d6f1df 2024-11-07T14:18:21,474 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/37d3f840407746548aced53860f1db69 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/37d3f840407746548aced53860f1db69 2024-11-07T14:18:21,475 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/b77db89c788247369d3dfca2c92a695b 2024-11-07T14:18:21,476 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1062487ac65b4c9d91c6625cdff4d3ed 2024-11-07T14:18:21,477 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/01bf186612c74e11a385026af0ffb450 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/01bf186612c74e11a385026af0ffb450 2024-11-07T14:18:21,477 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3453119d0a3c4c0f8f248f0ea80c0561 2024-11-07T14:18:21,478 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/3c3e6583cd12416a9cc1a9c85eefb87a 2024-11-07T14:18:21,479 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/04abcf52db7345198ed2d9943844edd7 2024-11-07T14:18:21,480 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/327acd3581d14b6385fd5aa542350ff1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/327acd3581d14b6385fd5aa542350ff1 2024-11-07T14:18:21,481 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1e674eb4b7a840e3b215933c67600a5a 2024-11-07T14:18:21,482 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/116853124a9240f99f04114b0b487469 2024-11-07T14:18:21,483 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/05606eb2b42c4fc194c07626ffe0a518 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/05606eb2b42c4fc194c07626ffe0a518 2024-11-07T14:18:21,484 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/455e92903e0a4db681683c0176917b0b 2024-11-07T14:18:21,485 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/55c971ce7d6d4b5e81c521dbf32826d9 2024-11-07T14:18:21,486 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0adf09160d894b20a1ed663291a164b4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0adf09160d894b20a1ed663291a164b4 2024-11-07T14:18:21,487 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/0f150e66ed8249b9a733aeb8b2f326ec 2024-11-07T14:18:21,488 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7e9c1f0a181c4d3e98b266df0218b6d0 2024-11-07T14:18:21,489 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/a9479a07811547baad32926505f565a0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/a9479a07811547baad32926505f565a0 2024-11-07T14:18:21,490 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/8d4fc18342e64bb8b4e7a93e56544381 2024-11-07T14:18:21,491 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/ab4d4915fb72493a8add4230c79c71a8 2024-11-07T14:18:21,492 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/d4321d6f01ff4f9fb26fefacfbfbfd3d 2024-11-07T14:18:21,493 DEBUG [StoreCloser-TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7f0a70f39c4144198f0964c945fe46dc 2024-11-07T14:18:21,497 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits/359.seqid, newMaxSeqId=359, maxSeqId=4 2024-11-07T14:18:21,498 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11. 2024-11-07T14:18:21,498 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for a97b90143d56ce006ffcb227cc121b11: 2024-11-07T14:18:21,499 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,500 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=a97b90143d56ce006ffcb227cc121b11, regionState=CLOSED 2024-11-07T14:18:21,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-07T14:18:21,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure a97b90143d56ce006ffcb227cc121b11, server=69430dbfd73f,45917,1730989044081 in 1.8950 sec 2024-11-07T14:18:21,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-07T14:18:21,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a97b90143d56ce006ffcb227cc121b11, UNASSIGN in 1.8990 sec 2024-11-07T14:18:21,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-07T14:18:21,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9010 sec 2024-11-07T14:18:21,505 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989101505"}]},"ts":"1730989101505"} 2024-11-07T14:18:21,506 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:18:21,509 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:18:21,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9150 sec 2024-11-07T14:18:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-07T14:18:21,700 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-07T14:18:21,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:18:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,702 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T14:18:21,703 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,705 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,707 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits] 2024-11-07T14:18:21,710 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/179268ed77164941be12572c9dd09243 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/179268ed77164941be12572c9dd09243 2024-11-07T14:18:21,711 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/d683b6991d4847698c10f4b502ceaed6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/d683b6991d4847698c10f4b502ceaed6 2024-11-07T14:18:21,712 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/dc404b5568e14e0484cf4c80321886c7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/A/dc404b5568e14e0484cf4c80321886c7 2024-11-07T14:18:21,714 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/7bd3993c146f42d8afdbc61d6431033c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/7bd3993c146f42d8afdbc61d6431033c 2024-11-07T14:18:21,715 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/80523eb709034d1db562254dffa2398f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/80523eb709034d1db562254dffa2398f 2024-11-07T14:18:21,717 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e01f9b2a82ca4145acd835b2ba0ed295 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/B/e01f9b2a82ca4145acd835b2ba0ed295 2024-11-07T14:18:21,719 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1a5f66da51134691a278f7c198478ab3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/1a5f66da51134691a278f7c198478ab3 2024-11-07T14:18:21,720 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7b7b24bb10fc4e09b1bbd26acb13956e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/7b7b24bb10fc4e09b1bbd26acb13956e 2024-11-07T14:18:21,721 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/c368e16011b042ca92eed7ce6750fd7d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/C/c368e16011b042ca92eed7ce6750fd7d 2024-11-07T14:18:21,724 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits/359.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11/recovered.edits/359.seqid 2024-11-07T14:18:21,724 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,725 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:18:21,725 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:18:21,726 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T14:18:21,729 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070eb6c7990f324feaa73ed9f6de2e3481_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070eb6c7990f324feaa73ed9f6de2e3481_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,730 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070f1bb190fe6d43b992d699745e4097d3_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411070f1bb190fe6d43b992d699745e4097d3_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,732 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110710355fe471ca4f66a91415da88b2d99a_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110710355fe471ca4f66a91415da88b2d99a_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,733 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110713dce815a3b840218c5e9898c81269a5_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110713dce815a3b840218c5e9898c81269a5_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,734 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110724faf90fe12d4a7dbb5aaa07d63e86de_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110724faf90fe12d4a7dbb5aaa07d63e86de_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,735 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110741d06897ae604a5086d7112cdad68e39_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110741d06897ae604a5086d7112cdad68e39_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,736 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075529d23274404cdf94d3807315cba977_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075529d23274404cdf94d3807315cba977_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,737 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110763fc0b12dbec4013aa5068e5934ce483_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110763fc0b12dbec4013aa5068e5934ce483_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,738 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077f7573e80eaa495abf904cbc3c8c5b87_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411077f7573e80eaa495abf904cbc3c8c5b87_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,739 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078bf82944f706417d80f8729b225895f4_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078bf82944f706417d80f8729b225895f4_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,741 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110796f0be9ef2914e23a81f0dbebb1b282d_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110796f0be9ef2914e23a81f0dbebb1b282d_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,742 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a5643913735249f2bda2974a49444478_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107a5643913735249f2bda2974a49444478_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,743 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107aadbb56d6e7e496da127838f6f7e864a_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107aadbb56d6e7e496da127838f6f7e864a_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,744 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b29143f742bb4682959ebd77f2a4a5a4_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b29143f742bb4682959ebd77f2a4a5a4_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,745 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d4880ccb5bcd4cf2a7b47c911aa7a11c_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d4880ccb5bcd4cf2a7b47c911aa7a11c_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,746 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107dffab3a315e845b3a9802cc47e2c9ef6_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107dffab3a315e845b3a9802cc47e2c9ef6_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,748 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f236d594be1c4ec78cc9c1c4ad6356c1_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f236d594be1c4ec78cc9c1c4ad6356c1_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,749 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f7076022701249afbb9f8a4a24bed844_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f7076022701249afbb9f8a4a24bed844_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,750 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f746306dd2604ab0be33b1ed56842751_a97b90143d56ce006ffcb227cc121b11 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107f746306dd2604ab0be33b1ed56842751_a97b90143d56ce006ffcb227cc121b11 2024-11-07T14:18:21,751 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:18:21,753 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,755 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:18:21,757 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:18:21,758 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,758 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:18:21,758 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989101758"}]},"ts":"9223372036854775807"} 2024-11-07T14:18:21,760 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:18:21,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a97b90143d56ce006ffcb227cc121b11, NAME => 'TestAcidGuarantees,,1730989074265.a97b90143d56ce006ffcb227cc121b11.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:18:21,760 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:18:21,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989101760"}]},"ts":"9223372036854775807"} 2024-11-07T14:18:21,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:18:21,764 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,765 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 63 msec 2024-11-07T14:18:21,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-07T14:18:21,804 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-07T14:18:21,814 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=245 (was 238) Potentially hanging thread: hconnection-0x10f14607-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_46215851_22 at /127.0.0.1:35282 [Waiting for operation #920] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1443265777_22 at /127.0.0.1:35298 [Waiting for operation #941] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1443265777_22 at /127.0.0.1:40022 [Waiting for operation #1381] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_46215851_22 at /127.0.0.1:37530 [Waiting for operation #1336] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10f14607-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=433 (was 436), ProcessCount=11 (was 11), AvailableMemoryMB=6023 (was 6283) 2024-11-07T14:18:21,822 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=245, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=433, ProcessCount=11, AvailableMemoryMB=6023 2024-11-07T14:18:21,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:18:21,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:18:21,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:21,826 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:18:21,826 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:21,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-07T14:18:21,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:21,826 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:18:21,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742041_1217 (size=960) 2024-11-07T14:18:21,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:22,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:22,234 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:18:22,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742042_1218 (size=53) 2024-11-07T14:18:22,404 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T14:18:22,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d9f2a49b2fc894490b8d6f930ab5fe35, disabling compactions & flushes 2024-11-07T14:18:22,640 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. after waiting 0 ms 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,640 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,640 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:22,641 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:18:22,642 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989102642"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989102642"}]},"ts":"1730989102642"} 2024-11-07T14:18:22,643 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:18:22,644 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:18:22,644 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989102644"}]},"ts":"1730989102644"} 2024-11-07T14:18:22,644 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:18:22,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, ASSIGN}] 2024-11-07T14:18:22,649 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, ASSIGN 2024-11-07T14:18:22,649 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:18:22,800 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=d9f2a49b2fc894490b8d6f930ab5fe35, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:22,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:22,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:22,955 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,955 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:18:22,956 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,956 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:18:22,956 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,956 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,957 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,958 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:22,959 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9f2a49b2fc894490b8d6f930ab5fe35 columnFamilyName A 2024-11-07T14:18:22,959 DEBUG [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:22,959 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(327): Store=d9f2a49b2fc894490b8d6f930ab5fe35/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:22,959 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,960 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:22,960 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9f2a49b2fc894490b8d6f930ab5fe35 columnFamilyName B 2024-11-07T14:18:22,960 DEBUG [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:22,961 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(327): Store=d9f2a49b2fc894490b8d6f930ab5fe35/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:22,961 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,961 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:22,962 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9f2a49b2fc894490b8d6f930ab5fe35 columnFamilyName C 2024-11-07T14:18:22,962 DEBUG [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:22,962 INFO [StoreOpener-d9f2a49b2fc894490b8d6f930ab5fe35-1 {}] regionserver.HStore(327): Store=d9f2a49b2fc894490b8d6f930ab5fe35/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:22,962 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,963 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,963 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,964 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:18:22,965 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:22,967 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:18:22,967 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened d9f2a49b2fc894490b8d6f930ab5fe35; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62755162, jitterRate=-0.06487521529197693}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:18:22,968 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:22,969 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., pid=66, masterSystemTime=1730989102952 2024-11-07T14:18:22,970 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,970 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:22,970 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=d9f2a49b2fc894490b8d6f930ab5fe35, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:22,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-07T14:18:22,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 in 170 msec 2024-11-07T14:18:22,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-07T14:18:22,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, ASSIGN in 324 msec 2024-11-07T14:18:22,974 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:18:22,974 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989102974"}]},"ts":"1730989102974"} 2024-11-07T14:18:22,975 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:18:22,977 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:18:22,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1530 sec 2024-11-07T14:18:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-07T14:18:23,930 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-07T14:18:23,932 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b6adc5 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a569490 2024-11-07T14:18:23,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1ac389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,937 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,938 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,939 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:18:23,940 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:18:23,942 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x669e1999 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6862e3ce 2024-11-07T14:18:23,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e73c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,946 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72aa9ee5 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d296fed 2024-11-07T14:18:23,950 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c480dfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,951 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-11-07T14:18:23,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,955 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-11-07T14:18:23,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-11-07T14:18:23,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-11-07T14:18:23,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-11-07T14:18:23,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,974 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-11-07T14:18:23,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-11-07T14:18:23,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,981 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-11-07T14:18:23,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:23,988 DEBUG [hconnection-0x201ae1e7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:23,989 DEBUG [hconnection-0x18f7b654-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,990 DEBUG [hconnection-0x6f644928-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,990 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,990 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-07T14:18:23,991 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,992 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:23,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:23,993 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:23,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:23,996 DEBUG [hconnection-0x1c685024-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,996 DEBUG [hconnection-0x76ba8f91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,997 DEBUG [hconnection-0x8359acd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,997 DEBUG [hconnection-0x594a6a72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:23,997 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,997 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,998 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:23,998 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:24,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:24,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:24,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:24,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,020 DEBUG [hconnection-0x1965f323-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:24,021 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:24,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,032 DEBUG [hconnection-0x5bccbb44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:24,033 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:24,036 DEBUG [hconnection-0x36b05fe9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:24,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989164035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,038 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:24,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/304f6021787045179c92ebc2db7e04de is 50, key is test_row_0/A:col10/1730989104000/Put/seqid=0 2024-11-07T14:18:24,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:24,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742043_1219 (size=12001) 2024-11-07T14:18:24,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989164138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,274 ERROR [LeaseRenewer:jenkins@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:34807,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:24,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989164342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/304f6021787045179c92ebc2db7e04de 2024-11-07T14:18:24,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/7c74afb4edd84d4c90df411a08053569 is 50, key is test_row_0/B:col10/1730989104000/Put/seqid=0 2024-11-07T14:18:24,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742044_1220 (size=12001) 2024-11-07T14:18:24,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/7c74afb4edd84d4c90df411a08053569 2024-11-07T14:18:24,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/3c695a677c664af5aa963fd3a3704cea is 50, key is test_row_0/C:col10/1730989104000/Put/seqid=0 2024-11-07T14:18:24,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742045_1221 (size=12001) 2024-11-07T14:18:24,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/3c695a677c664af5aa963fd3a3704cea 2024-11-07T14:18:24,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/304f6021787045179c92ebc2db7e04de as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de 2024-11-07T14:18:24,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:18:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/7c74afb4edd84d4c90df411a08053569 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569 2024-11-07T14:18:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:24,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:18:24,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/3c695a677c664af5aa963fd3a3704cea as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea 2024-11-07T14:18:24,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea, entries=150, sequenceid=13, filesize=11.7 K 2024-11-07T14:18:24,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d9f2a49b2fc894490b8d6f930ab5fe35 in 616ms, sequenceid=13, compaction requested=false 2024-11-07T14:18:24,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:24,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:18:24,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:24,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:24,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:24,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/be8fc892d67f45fba9abcf68af683d78 is 50, key is test_row_0/A:col10/1730989104019/Put/seqid=0 2024-11-07T14:18:24,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989164646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742046_1222 (size=14341) 2024-11-07T14:18:24,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/be8fc892d67f45fba9abcf68af683d78 2024-11-07T14:18:24,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/2f034c1fa4ff43a59ec29d768ef0471c is 50, key is test_row_0/B:col10/1730989104019/Put/seqid=0 2024-11-07T14:18:24,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742047_1223 (size=12001) 2024-11-07T14:18:24,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,914 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:24,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:24,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989164944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989164944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989164944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:24,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989164944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/2f034c1fa4ff43a59ec29d768ef0471c 2024-11-07T14:18:25,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/65febfbe71d4483694eae513c5cfb4db is 50, key is test_row_0/C:col10/1730989104019/Put/seqid=0 2024-11-07T14:18:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742048_1224 (size=12001) 2024-11-07T14:18:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:25,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989165149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:25,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:25,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989165247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989165247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989165247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989165248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:25,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:25,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:25,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/65febfbe71d4483694eae513c5cfb4db 2024-11-07T14:18:25,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/be8fc892d67f45fba9abcf68af683d78 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78 2024-11-07T14:18:25,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78, entries=200, sequenceid=41, filesize=14.0 K 2024-11-07T14:18:25,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/2f034c1fa4ff43a59ec29d768ef0471c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c 2024-11-07T14:18:25,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-07T14:18:25,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/65febfbe71d4483694eae513c5cfb4db as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db 2024-11-07T14:18:25,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db, entries=150, sequenceid=41, filesize=11.7 K 2024-11-07T14:18:25,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for d9f2a49b2fc894490b8d6f930ab5fe35 in 879ms, sequenceid=41, compaction requested=false 2024-11-07T14:18:25,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:25,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-07T14:18:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,528 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T14:18:25,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:25,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74a57279b9c24e288d4a81dba830a5f8 is 50, key is test_row_0/A:col10/1730989104635/Put/seqid=0 2024-11-07T14:18:25,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742049_1225 (size=12001) 2024-11-07T14:18:25,546 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74a57279b9c24e288d4a81dba830a5f8 2024-11-07T14:18:25,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/13219b548b584a50bb1ea1bc78147855 is 50, key is test_row_0/B:col10/1730989104635/Put/seqid=0 2024-11-07T14:18:25,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742050_1226 (size=12001) 2024-11-07T14:18:25,561 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/13219b548b584a50bb1ea1bc78147855 2024-11-07T14:18:25,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0b49ca003dd247ddb641449a7001fd7b is 50, key is test_row_0/C:col10/1730989104635/Put/seqid=0 2024-11-07T14:18:25,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742051_1227 (size=12001) 2024-11-07T14:18:25,583 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0b49ca003dd247ddb641449a7001fd7b 2024-11-07T14:18:25,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74a57279b9c24e288d4a81dba830a5f8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8 2024-11-07T14:18:25,598 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8, entries=150, sequenceid=49, filesize=11.7 K 2024-11-07T14:18:25,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/13219b548b584a50bb1ea1bc78147855 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855 2024-11-07T14:18:25,606 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855, entries=150, sequenceid=49, filesize=11.7 K 2024-11-07T14:18:25,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0b49ca003dd247ddb641449a7001fd7b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b 2024-11-07T14:18:25,613 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b, entries=150, sequenceid=49, filesize=11.7 K 2024-11-07T14:18:25,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-07T14:18:25,614 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d9f2a49b2fc894490b8d6f930ab5fe35 in 86ms, sequenceid=49, compaction requested=true 2024-11-07T14:18:25,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:25,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:25,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-07T14:18:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-07T14:18:25,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-07T14:18:25,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-11-07T14:18:25,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.6370 sec 2024-11-07T14:18:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:25,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:25,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:25,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4a43549d252c42f5b3085c953b01daec is 50, key is test_row_0/A:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742052_1228 (size=12001) 2024-11-07T14:18:25,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4a43549d252c42f5b3085c953b01daec 2024-11-07T14:18:25,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/590005240ae5425d800c86abfb43e7ff is 50, key is test_row_0/B:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:25,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989165779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989165782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989165782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989165783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742053_1229 (size=12001) 2024-11-07T14:18:25,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989165884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989165887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989165887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:25,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989165887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989166089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989166091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989166091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989166092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-07T14:18:26,098 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-07T14:18:26,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-07T14:18:26,101 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:26,102 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:26,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:26,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989166153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/590005240ae5425d800c86abfb43e7ff 2024-11-07T14:18:26,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/337c34a98cff41cb82dadbbd1c127873 is 50, key is test_row_0/C:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:26,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742054_1230 (size=12001) 2024-11-07T14:18:26,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/337c34a98cff41cb82dadbbd1c127873 2024-11-07T14:18:26,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4a43549d252c42f5b3085c953b01daec as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec 2024-11-07T14:18:26,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec, entries=150, sequenceid=62, filesize=11.7 K 2024-11-07T14:18:26,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/590005240ae5425d800c86abfb43e7ff as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff 2024-11-07T14:18:26,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff, entries=150, sequenceid=62, filesize=11.7 K 2024-11-07T14:18:26,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/337c34a98cff41cb82dadbbd1c127873 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873 2024-11-07T14:18:26,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873, entries=150, sequenceid=62, filesize=11.7 K 2024-11-07T14:18:26,254 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d9f2a49b2fc894490b8d6f930ab5fe35 in 497ms, sequenceid=62, compaction requested=true 2024-11-07T14:18:26,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:26,254 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:26,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-07T14:18:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:26,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:26,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:26,255 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:18:26,255 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:26,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:26,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:26,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:26,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:26,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:26,258 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:26,258 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:26,258 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:26,258 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=49.2 K 2024-11-07T14:18:26,258 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:26,258 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:26,258 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:26,258 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=46.9 K 2024-11-07T14:18:26,260 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c74afb4edd84d4c90df411a08053569, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989104000 2024-11-07T14:18:26,260 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 304f6021787045179c92ebc2db7e04de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989104000 2024-11-07T14:18:26,266 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f034c1fa4ff43a59ec29d768ef0471c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989104019 2024-11-07T14:18:26,267 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 13219b548b584a50bb1ea1bc78147855, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1730989104635 2024-11-07T14:18:26,268 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 590005240ae5425d800c86abfb43e7ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:26,269 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting be8fc892d67f45fba9abcf68af683d78, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989104019 2024-11-07T14:18:26,270 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74a57279b9c24e288d4a81dba830a5f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1730989104635 2024-11-07T14:18:26,271 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a43549d252c42f5b3085c953b01daec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:26,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51b4a1a37a4c40dba8619465cad233d8 is 50, key is test_row_0/A:col10/1730989105776/Put/seqid=0 2024-11-07T14:18:26,290 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#186 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:18:26,291 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/d6e5a0bf22234bdf9f3dddefe3c0688e is 50, key is test_row_0/B:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:26,292 DEBUG [regionserver/69430dbfd73f:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-11-07T14:18:26,293 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#187 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:26,294 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/b8742b2b7f2a4914b7296bd7dd0d302f is 50, key is test_row_0/A:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:26,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742055_1231 (size=12001) 2024-11-07T14:18:26,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742057_1233 (size=12139) 2024-11-07T14:18:26,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742056_1232 (size=12139) 2024-11-07T14:18:26,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:26,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:26,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989166404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989166406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989166406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989166408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989166512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989166512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989166513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989166515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:26,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989166716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989166717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989166717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989166721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:26,727 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51b4a1a37a4c40dba8619465cad233d8 2024-11-07T14:18:26,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/6fe97d0635fc40fe888156aaebaab153 is 50, key is test_row_0/B:col10/1730989105776/Put/seqid=0 2024-11-07T14:18:26,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742058_1234 (size=12001) 2024-11-07T14:18:26,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/b8742b2b7f2a4914b7296bd7dd0d302f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/b8742b2b7f2a4914b7296bd7dd0d302f 2024-11-07T14:18:26,751 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/d6e5a0bf22234bdf9f3dddefe3c0688e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d6e5a0bf22234bdf9f3dddefe3c0688e 2024-11-07T14:18:26,757 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into b8742b2b7f2a4914b7296bd7dd0d302f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:26,757 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=12, startTime=1730989106254; duration=0sec 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:26,757 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into d6e5a0bf22234bdf9f3dddefe3c0688e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:26,757 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=12, startTime=1730989106255; duration=0sec 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:26,757 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:26,759 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:26,759 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:26,759 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:26,759 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=46.9 K 2024-11-07T14:18:26,759 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c695a677c664af5aa963fd3a3704cea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730989104000 2024-11-07T14:18:26,760 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65febfbe71d4483694eae513c5cfb4db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989104019 2024-11-07T14:18:26,761 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b49ca003dd247ddb641449a7001fd7b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1730989104635 2024-11-07T14:18:26,761 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 337c34a98cff41cb82dadbbd1c127873, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:26,773 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#189 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:26,773 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/106b09c3c2d64841a84f1aeb3d26faf6 is 50, key is test_row_0/C:col10/1730989105757/Put/seqid=0 2024-11-07T14:18:26,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742059_1235 (size=12139) 2024-11-07T14:18:26,798 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/106b09c3c2d64841a84f1aeb3d26faf6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/106b09c3c2d64841a84f1aeb3d26faf6 2024-11-07T14:18:26,805 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 106b09c3c2d64841a84f1aeb3d26faf6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:26,806 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:26,806 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=12, startTime=1730989106256; duration=0sec 2024-11-07T14:18:26,806 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:26,806 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:26,985 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:27,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989167019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989167023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989167023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989167025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,029 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:18:27,144 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/6fe97d0635fc40fe888156aaebaab153 2024-11-07T14:18:27,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/54b936af5e384840a3ebfe90b6019626 is 50, key is test_row_0/C:col10/1730989105776/Put/seqid=0 2024-11-07T14:18:27,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742060_1236 (size=12001) 2024-11-07T14:18:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:27,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989167528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989167528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989167529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989167529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:27,562 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/54b936af5e384840a3ebfe90b6019626 2024-11-07T14:18:27,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51b4a1a37a4c40dba8619465cad233d8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8 2024-11-07T14:18:27,576 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8, entries=150, sequenceid=86, filesize=11.7 K 2024-11-07T14:18:27,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/6fe97d0635fc40fe888156aaebaab153 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153 2024-11-07T14:18:27,582 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153, entries=150, sequenceid=86, filesize=11.7 K 2024-11-07T14:18:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/54b936af5e384840a3ebfe90b6019626 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626 2024-11-07T14:18:27,588 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626, entries=150, sequenceid=86, filesize=11.7 K 2024-11-07T14:18:27,589 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1335ms, sequenceid=86, compaction requested=false 2024-11-07T14:18:27,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:27,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:27,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-07T14:18:27,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-07T14:18:27,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-07T14:18:27,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4890 sec 2024-11-07T14:18:27,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.4940 sec 2024-11-07T14:18:27,742 DEBUG [master/69430dbfd73f:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 88991fc0836214a8e1586689b73580c1 changed from -1.0 to 0.0, refreshing cache 2024-11-07T14:18:28,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:28,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d2b88d00580b48df8c0373ccf2766ddc is 50, key is test_row_0/A:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742061_1237 (size=12001) 2024-11-07T14:18:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-07T14:18:28,208 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-07T14:18:28,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:28,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-07T14:18:28,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-07T14:18:28,212 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:28,213 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:28,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:28,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989168260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-07T14:18:28,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989168363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,365 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-07T14:18:28,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:28,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-07T14:18:28,518 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-07T14:18:28,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:28,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989168532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989168533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989168534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989168534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989168565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d2b88d00580b48df8c0373ccf2766ddc 2024-11-07T14:18:28,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8696a14836da426cbd7f5fb22034882d is 50, key is test_row_0/B:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742062_1238 (size=12001) 2024-11-07T14:18:28,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8696a14836da426cbd7f5fb22034882d 2024-11-07T14:18:28,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/bce9128814404198b8116ac7049455eb is 50, key is test_row_0/C:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742063_1239 (size=12001) 2024-11-07T14:18:28,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/bce9128814404198b8116ac7049455eb 2024-11-07T14:18:28,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d2b88d00580b48df8c0373ccf2766ddc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc 2024-11-07T14:18:28,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc, entries=150, sequenceid=102, filesize=11.7 K 2024-11-07T14:18:28,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8696a14836da426cbd7f5fb22034882d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d 2024-11-07T14:18:28,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d, entries=150, sequenceid=102, filesize=11.7 K 2024-11-07T14:18:28,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/bce9128814404198b8116ac7049455eb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb 2024-11-07T14:18:28,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb, entries=150, sequenceid=102, filesize=11.7 K 2024-11-07T14:18:28,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-07T14:18:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d9f2a49b2fc894490b8d6f930ab5fe35 in 498ms, sequenceid=102, compaction requested=true 2024-11-07T14:18:28,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:28,674 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:28,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:28,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:28,676 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:28,677 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:28,677 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:28,677 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,677 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/b8742b2b7f2a4914b7296bd7dd0d302f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.3 K 2024-11-07T14:18:28,679 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8742b2b7f2a4914b7296bd7dd0d302f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:28,680 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:28,680 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:28,680 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,680 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51b4a1a37a4c40dba8619465cad233d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1730989105776 2024-11-07T14:18:28,680 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d6e5a0bf22234bdf9f3dddefe3c0688e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.3 K 2024-11-07T14:18:28,680 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2b88d00580b48df8c0373ccf2766ddc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:28,681 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d6e5a0bf22234bdf9f3dddefe3c0688e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:28,682 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe97d0635fc40fe888156aaebaab153, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1730989105776 2024-11-07T14:18:28,684 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8696a14836da426cbd7f5fb22034882d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:28,696 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#194 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:28,697 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a48d0b1eadd0442a97aaaabc65d44501 is 50, key is test_row_0/A:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,701 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:28,702 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/aebf96503800437fbc3041d4246374cf is 50, key is test_row_0/B:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742064_1240 (size=12241) 2024-11-07T14:18:28,713 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a48d0b1eadd0442a97aaaabc65d44501 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a48d0b1eadd0442a97aaaabc65d44501 2024-11-07T14:18:28,719 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into a48d0b1eadd0442a97aaaabc65d44501(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:28,719 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:28,719 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989108674; duration=0sec 2024-11-07T14:18:28,721 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:28,721 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:28,721 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:28,722 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:28,722 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:28,722 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,722 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/106b09c3c2d64841a84f1aeb3d26faf6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.3 K 2024-11-07T14:18:28,722 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 106b09c3c2d64841a84f1aeb3d26faf6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1730989105754 2024-11-07T14:18:28,723 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54b936af5e384840a3ebfe90b6019626, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1730989105776 2024-11-07T14:18:28,723 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bce9128814404198b8116ac7049455eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:28,750 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#196 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:28,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742065_1241 (size=12241) 2024-11-07T14:18:28,751 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/2aea7f5bbe43472fbafd5427e48742dc is 50, key is test_row_0/C:col10/1730989108175/Put/seqid=0 2024-11-07T14:18:28,758 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/aebf96503800437fbc3041d4246374cf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aebf96503800437fbc3041d4246374cf 2024-11-07T14:18:28,763 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into aebf96503800437fbc3041d4246374cf(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:28,763 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:28,763 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989108676; duration=0sec 2024-11-07T14:18:28,763 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:28,763 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:28,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742066_1242 (size=12241) 2024-11-07T14:18:28,778 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/2aea7f5bbe43472fbafd5427e48742dc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2aea7f5bbe43472fbafd5427e48742dc 2024-11-07T14:18:28,784 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 2aea7f5bbe43472fbafd5427e48742dc(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:28,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:28,784 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989108676; duration=0sec 2024-11-07T14:18:28,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:28,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:28,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-07T14:18:28,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-07T14:18:28,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,826 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:28,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/c01cb5d57f0b410ea8fa3120adcf2d68 is 50, key is test_row_0/A:col10/1730989108259/Put/seqid=0 2024-11-07T14:18:28,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742067_1243 (size=12001) 2024-11-07T14:18:28,842 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/c01cb5d57f0b410ea8fa3120adcf2d68 2024-11-07T14:18:28,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/858266c9a3dd4e0ab705b10c7da7c604 is 50, key is test_row_0/B:col10/1730989108259/Put/seqid=0 2024-11-07T14:18:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:28,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:28,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742068_1244 (size=12001) 2024-11-07T14:18:28,894 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/858266c9a3dd4e0ab705b10c7da7c604 2024-11-07T14:18:28,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989168899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:28,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/320c5e2a782140fe88b537b7d7ee1508 is 50, key is test_row_0/C:col10/1730989108259/Put/seqid=0 2024-11-07T14:18:28,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742069_1245 (size=12001) 2024-11-07T14:18:28,939 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/320c5e2a782140fe88b537b7d7ee1508 2024-11-07T14:18:28,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/c01cb5d57f0b410ea8fa3120adcf2d68 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68 2024-11-07T14:18:28,949 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68, entries=150, sequenceid=128, filesize=11.7 K 2024-11-07T14:18:28,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/858266c9a3dd4e0ab705b10c7da7c604 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604 2024-11-07T14:18:28,956 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604, entries=150, sequenceid=128, filesize=11.7 K 2024-11-07T14:18:28,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/320c5e2a782140fe88b537b7d7ee1508 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508 2024-11-07T14:18:28,964 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508, entries=150, sequenceid=128, filesize=11.7 K 2024-11-07T14:18:28,965 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d9f2a49b2fc894490b8d6f930ab5fe35 in 139ms, sequenceid=128, compaction requested=false 2024-11-07T14:18:28,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:28,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:28,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-07T14:18:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-07T14:18:28,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-07T14:18:28,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 752 msec 2024-11-07T14:18:28,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 758 msec 2024-11-07T14:18:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:29,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:29,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/1edfd2ff2e374865aa39851f263e53de is 50, key is test_row_0/A:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742070_1246 (size=12151) 2024-11-07T14:18:29,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/1edfd2ff2e374865aa39851f263e53de 2024-11-07T14:18:29,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8cd733df857043a5a2154c2b182687da is 50, key is test_row_0/B:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742071_1247 (size=12151) 2024-11-07T14:18:29,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8cd733df857043a5a2154c2b182687da 2024-11-07T14:18:29,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/17b2c2834dde4de6a04798c09a859e8d is 50, key is test_row_0/C:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742072_1248 (size=12151) 2024-11-07T14:18:29,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/17b2c2834dde4de6a04798c09a859e8d 2024-11-07T14:18:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/1edfd2ff2e374865aa39851f263e53de as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de 2024-11-07T14:18:29,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de, entries=150, sequenceid=142, filesize=11.9 K 2024-11-07T14:18:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8cd733df857043a5a2154c2b182687da as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da 2024-11-07T14:18:29,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da, entries=150, sequenceid=142, filesize=11.9 K 2024-11-07T14:18:29,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/17b2c2834dde4de6a04798c09a859e8d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d 2024-11-07T14:18:29,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d, entries=150, sequenceid=142, filesize=11.9 K 2024-11-07T14:18:29,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d9f2a49b2fc894490b8d6f930ab5fe35 in 87ms, sequenceid=142, compaction requested=true 2024-11-07T14:18:29,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:29,090 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:29,092 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:29,092 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:29,092 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:29,092 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a48d0b1eadd0442a97aaaabc65d44501, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.5 K 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:29,092 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:29,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:29,092 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a48d0b1eadd0442a97aaaabc65d44501, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:29,093 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c01cb5d57f0b410ea8fa3120adcf2d68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730989108252 2024-11-07T14:18:29,093 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1edfd2ff2e374865aa39851f263e53de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:29,094 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:29,094 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:29,094 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:29,094 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aebf96503800437fbc3041d4246374cf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.5 K 2024-11-07T14:18:29,095 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting aebf96503800437fbc3041d4246374cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:29,095 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 858266c9a3dd4e0ab705b10c7da7c604, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730989108252 2024-11-07T14:18:29,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cd733df857043a5a2154c2b182687da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:29,106 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#203 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:29,106 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a56191bc5ff9445faf5d158a278d4f87 is 50, key is test_row_0/A:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,120 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#204 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:29,121 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/259bb8dcd8e14468a5a70e3fda02839a is 50, key is test_row_0/B:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742073_1249 (size=12493) 2024-11-07T14:18:29,150 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a56191bc5ff9445faf5d158a278d4f87 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a56191bc5ff9445faf5d158a278d4f87 2024-11-07T14:18:29,156 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into a56191bc5ff9445faf5d158a278d4f87(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:29,156 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:29,156 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989109090; duration=0sec 2024-11-07T14:18:29,157 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:29,157 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:29,157 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:29,158 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:29,158 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:29,158 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:29,158 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2aea7f5bbe43472fbafd5427e48742dc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=35.5 K 2024-11-07T14:18:29,159 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aea7f5bbe43472fbafd5427e48742dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1730989106403 2024-11-07T14:18:29,160 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 320c5e2a782140fe88b537b7d7ee1508, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730989108252 2024-11-07T14:18:29,160 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17b2c2834dde4de6a04798c09a859e8d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:29,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742074_1250 (size=12493) 2024-11-07T14:18:29,174 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#205 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:29,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/5841af2d948d461ea71c77485f626650 is 50, key is test_row_0/C:col10/1730989108893/Put/seqid=0 2024-11-07T14:18:29,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:29,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:18:29,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:29,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:29,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:29,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,188 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/259bb8dcd8e14468a5a70e3fda02839a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/259bb8dcd8e14468a5a70e3fda02839a 2024-11-07T14:18:29,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/6af6507f90b04b8cbe0cfc06d3c2183e is 50, key is test_row_0/A:col10/1730989109053/Put/seqid=0 2024-11-07T14:18:29,197 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into 259bb8dcd8e14468a5a70e3fda02839a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:29,197 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:29,197 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989109092; duration=0sec 2024-11-07T14:18:29,198 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:29,199 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:29,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742075_1251 (size=12493) 2024-11-07T14:18:29,211 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/5841af2d948d461ea71c77485f626650 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5841af2d948d461ea71c77485f626650 2024-11-07T14:18:29,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742076_1252 (size=14541) 2024-11-07T14:18:29,221 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 5841af2d948d461ea71c77485f626650(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:29,221 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:29,221 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989109092; duration=0sec 2024-11-07T14:18:29,221 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:29,221 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:29,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/6af6507f90b04b8cbe0cfc06d3c2183e 2024-11-07T14:18:29,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/07887b5f20cd4e9e9dfb0cc0893f625c is 50, key is test_row_0/B:col10/1730989109053/Put/seqid=0 2024-11-07T14:18:29,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742077_1253 (size=12151) 2024-11-07T14:18:29,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/07887b5f20cd4e9e9dfb0cc0893f625c 2024-11-07T14:18:29,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/ef4ba4bb0adb413483e8f60580597b5c is 50, key is test_row_0/C:col10/1730989109053/Put/seqid=0 2024-11-07T14:18:29,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-07T14:18:29,316 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-07T14:18:29,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:29,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-07T14:18:29,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T14:18:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742078_1254 (size=12151) 2024-11-07T14:18:29,319 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:29,320 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:29,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:29,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/ef4ba4bb0adb413483e8f60580597b5c 2024-11-07T14:18:29,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/6af6507f90b04b8cbe0cfc06d3c2183e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e 2024-11-07T14:18:29,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e, entries=200, sequenceid=166, filesize=14.2 K 2024-11-07T14:18:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/07887b5f20cd4e9e9dfb0cc0893f625c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c 2024-11-07T14:18:29,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c, entries=150, sequenceid=166, filesize=11.9 K 2024-11-07T14:18:29,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/ef4ba4bb0adb413483e8f60580597b5c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c 2024-11-07T14:18:29,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c, entries=150, sequenceid=166, filesize=11.9 K 2024-11-07T14:18:29,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d9f2a49b2fc894490b8d6f930ab5fe35 in 169ms, sequenceid=166, compaction requested=false 2024-11-07T14:18:29,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T14:18:29,472 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-07T14:18:29,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:29,473 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:29,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:29,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e5fcbb9c729f46049d8bb599e9d5f5a4 is 50, key is test_row_0/A:col10/1730989109212/Put/seqid=0 2024-11-07T14:18:29,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742079_1255 (size=12151) 2024-11-07T14:18:29,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:29,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:29,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T14:18:29,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,884 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e5fcbb9c729f46049d8bb599e9d5f5a4 2024-11-07T14:18:29,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:29,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989169886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e434ba63564e489fa98cab6aaf43ff0d is 50, key is test_row_0/B:col10/1730989109212/Put/seqid=0 2024-11-07T14:18:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T14:18:29,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742080_1256 (size=12151) 2024-11-07T14:18:30,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989170190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,342 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e434ba63564e489fa98cab6aaf43ff0d 2024-11-07T14:18:30,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e556bcf1dcef4bacba05a9b75c144f97 is 50, key is test_row_0/C:col10/1730989109212/Put/seqid=0 2024-11-07T14:18:30,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742081_1257 (size=12151) 2024-11-07T14:18:30,357 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e556bcf1dcef4bacba05a9b75c144f97 2024-11-07T14:18:30,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e5fcbb9c729f46049d8bb599e9d5f5a4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4 2024-11-07T14:18:30,366 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4, entries=150, sequenceid=181, filesize=11.9 K 2024-11-07T14:18:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e434ba63564e489fa98cab6aaf43ff0d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d 2024-11-07T14:18:30,371 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d, entries=150, sequenceid=181, filesize=11.9 K 2024-11-07T14:18:30,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e556bcf1dcef4bacba05a9b75c144f97 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97 2024-11-07T14:18:30,376 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97, entries=150, sequenceid=181, filesize=11.9 K 2024-11-07T14:18:30,377 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d9f2a49b2fc894490b8d6f930ab5fe35 in 904ms, sequenceid=181, compaction requested=true 2024-11-07T14:18:30,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:30,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-07T14:18:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-07T14:18:30,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-07T14:18:30,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0580 sec 2024-11-07T14:18:30,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.0620 sec 2024-11-07T14:18:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-07T14:18:30,422 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-07T14:18:30,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-07T14:18:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:30,425 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:30,426 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:30,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:30,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:30,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/85212e46c26c48bbab43d0fb6e7708eb is 50, key is test_row_0/A:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:30,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989170551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,557 DEBUG [Thread-1026 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:30,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989170553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989170560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742082_1258 (size=12151) 2024-11-07T14:18:30,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989170571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,577 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:30,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:30,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989170659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989170665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989170674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989170697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:30,730 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:30,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:30,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989170861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989170867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:30,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989170876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,883 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:30,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:30,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:30,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/85212e46c26c48bbab43d0fb6e7708eb 2024-11-07T14:18:30,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/3957a2914fa14932b5dbdfd2f8b9b2f5 is 50, key is test_row_0/B:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:30,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742083_1259 (size=12151) 2024-11-07T14:18:31,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:31,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989171164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989171170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989171181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,189 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/3957a2914fa14932b5dbdfd2f8b9b2f5 2024-11-07T14:18:31,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/dc070d9c85ff423d9ab7eca23a1d5e1f is 50, key is test_row_0/C:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:31,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742084_1260 (size=12151) 2024-11-07T14:18:31,495 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:31,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989171666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989171675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989171687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989171702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:31,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:31,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/dc070d9c85ff423d9ab7eca23a1d5e1f 2024-11-07T14:18:31,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/85212e46c26c48bbab43d0fb6e7708eb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb 2024-11-07T14:18:31,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb, entries=150, sequenceid=206, filesize=11.9 K 2024-11-07T14:18:31,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/3957a2914fa14932b5dbdfd2f8b9b2f5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5 2024-11-07T14:18:31,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5, entries=150, sequenceid=206, filesize=11.9 K 2024-11-07T14:18:31,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/dc070d9c85ff423d9ab7eca23a1d5e1f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f 2024-11-07T14:18:31,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f, entries=150, sequenceid=206, filesize=11.9 K 2024-11-07T14:18:31,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1296ms, sequenceid=206, compaction requested=true 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:31,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:31,837 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:31,837 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:31,838 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51336 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:31,838 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:31,838 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,838 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a56191bc5ff9445faf5d158a278d4f87, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=50.1 K 2024-11-07T14:18:31,838 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:31,838 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a56191bc5ff9445faf5d158a278d4f87, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:31,838 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:31,838 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,838 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/259bb8dcd8e14468a5a70e3fda02839a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=47.8 K 2024-11-07T14:18:31,839 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6af6507f90b04b8cbe0cfc06d3c2183e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1730989109053 2024-11-07T14:18:31,839 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 259bb8dcd8e14468a5a70e3fda02839a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:31,839 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5fcbb9c729f46049d8bb599e9d5f5a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1730989109197 2024-11-07T14:18:31,839 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 07887b5f20cd4e9e9dfb0cc0893f625c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1730989109053 2024-11-07T14:18:31,839 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85212e46c26c48bbab43d0fb6e7708eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:31,840 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e434ba63564e489fa98cab6aaf43ff0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1730989109197 2024-11-07T14:18:31,840 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3957a2914fa14932b5dbdfd2f8b9b2f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:31,849 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#215 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:31,850 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a883341030a348ab93a713dcf206ab17 is 50, key is test_row_0/A:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:31,854 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:31,855 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/fe2a4fcaa8a3420c8f76db202072401e is 50, key is test_row_0/B:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:31,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742086_1262 (size=12629) 2024-11-07T14:18:31,890 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/fe2a4fcaa8a3420c8f76db202072401e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/fe2a4fcaa8a3420c8f76db202072401e 2024-11-07T14:18:31,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742085_1261 (size=12629) 2024-11-07T14:18:31,900 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a883341030a348ab93a713dcf206ab17 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a883341030a348ab93a713dcf206ab17 2024-11-07T14:18:31,901 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into fe2a4fcaa8a3420c8f76db202072401e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:31,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:31,901 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=12, startTime=1730989111836; duration=0sec 2024-11-07T14:18:31,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:31,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:31,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:31,904 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:31,905 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:31,905 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into a883341030a348ab93a713dcf206ab17(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:31,905 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,906 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:31,906 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=12, startTime=1730989111836; duration=0sec 2024-11-07T14:18:31,906 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5841af2d948d461ea71c77485f626650, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=47.8 K 2024-11-07T14:18:31,906 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:31,906 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:31,907 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5841af2d948d461ea71c77485f626650, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1730989108893 2024-11-07T14:18:31,907 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ef4ba4bb0adb413483e8f60580597b5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1730989109053 2024-11-07T14:18:31,907 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e556bcf1dcef4bacba05a9b75c144f97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1730989109197 2024-11-07T14:18:31,908 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting dc070d9c85ff423d9ab7eca23a1d5e1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:31,919 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:31,920 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/c5406b16bfc4461584876b824c3d01ed is 50, key is test_row_0/C:col10/1730989110539/Put/seqid=0 2024-11-07T14:18:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742087_1263 (size=12629) 2024-11-07T14:18:31,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:31,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:31,955 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:31,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:31,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/0ccbb08c9b2646d0b07dc4ce02d8c514 is 50, key is test_row_0/A:col10/1730989110553/Put/seqid=0 2024-11-07T14:18:31,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742088_1264 (size=12151) 2024-11-07T14:18:32,331 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/c5406b16bfc4461584876b824c3d01ed as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c5406b16bfc4461584876b824c3d01ed 2024-11-07T14:18:32,339 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into c5406b16bfc4461584876b824c3d01ed(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:32,339 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:32,339 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=12, startTime=1730989111836; duration=0sec 2024-11-07T14:18:32,339 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:32,339 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:32,366 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/0ccbb08c9b2646d0b07dc4ce02d8c514 2024-11-07T14:18:32,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/eeefa918b0d14ac980c1eaee82da307b is 50, key is test_row_0/B:col10/1730989110553/Put/seqid=0 2024-11-07T14:18:32,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742089_1265 (size=12151) 2024-11-07T14:18:32,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:32,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:32,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989172697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989172697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:32,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:32,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989172698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:32,778 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/eeefa918b0d14ac980c1eaee82da307b 2024-11-07T14:18:32,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/fd903b4b12144a6cb81c7fa220f1d2df is 50, key is test_row_0/C:col10/1730989110553/Put/seqid=0 2024-11-07T14:18:32,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742090_1266 (size=12151) 2024-11-07T14:18:32,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:32,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:32,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989172799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:32,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989172799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989173002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989173003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,193 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/fd903b4b12144a6cb81c7fa220f1d2df 2024-11-07T14:18:33,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/0ccbb08c9b2646d0b07dc4ce02d8c514 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514 2024-11-07T14:18:33,201 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514, entries=150, sequenceid=220, filesize=11.9 K 2024-11-07T14:18:33,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/eeefa918b0d14ac980c1eaee82da307b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b 2024-11-07T14:18:33,206 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b, entries=150, sequenceid=220, filesize=11.9 K 2024-11-07T14:18:33,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/fd903b4b12144a6cb81c7fa220f1d2df as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df 2024-11-07T14:18:33,211 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df, entries=150, sequenceid=220, filesize=11.9 K 2024-11-07T14:18:33,212 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1257ms, sequenceid=220, compaction requested=false 2024-11-07T14:18:33,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:33,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:33,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-07T14:18:33,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-07T14:18:33,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-07T14:18:33,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7870 sec 2024-11-07T14:18:33,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.7910 sec 2024-11-07T14:18:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:33,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:33,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:33,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51d4a2dd6dd947ef9b8041c875ac79ff is 50, key is test_row_0/A:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:33,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742091_1267 (size=12151) 2024-11-07T14:18:33,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989173320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989173320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989173422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989173423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989173625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989173626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989173711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,712 DEBUG [Thread-1022 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:33,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51d4a2dd6dd947ef9b8041c875ac79ff 2024-11-07T14:18:33,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/183d9825e8bb4dc39bbb15180410b5dd is 50, key is test_row_0/B:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:33,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742092_1268 (size=12151) 2024-11-07T14:18:33,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989173927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:33,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:33,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989173930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/183d9825e8bb4dc39bbb15180410b5dd 2024-11-07T14:18:34,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/51c7e07d55814aff87b5c32872d835b8 is 50, key is test_row_0/C:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:34,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742093_1269 (size=12151) 2024-11-07T14:18:34,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:34,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989174434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:34,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989174436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-07T14:18:34,530 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-07T14:18:34,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-07T14:18:34,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T14:18:34,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:34,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:34,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:34,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989174573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,576 DEBUG [Thread-1026 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:34,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/51c7e07d55814aff87b5c32872d835b8 2024-11-07T14:18:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/51d4a2dd6dd947ef9b8041c875ac79ff as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff 2024-11-07T14:18:34,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff, entries=150, sequenceid=247, filesize=11.9 K 2024-11-07T14:18:34,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/183d9825e8bb4dc39bbb15180410b5dd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd 2024-11-07T14:18:34,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd, entries=150, sequenceid=247, filesize=11.9 K 2024-11-07T14:18:34,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/51c7e07d55814aff87b5c32872d835b8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8 2024-11-07T14:18:34,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8, entries=150, sequenceid=247, filesize=11.9 K 2024-11-07T14:18:34,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1298ms, sequenceid=247, compaction requested=true 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:34,605 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:34,605 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:34,606 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:34,606 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:34,606 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:34,607 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/fe2a4fcaa8a3420c8f76db202072401e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.1 K 2024-11-07T14:18:34,607 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:34,607 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:34,607 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:34,607 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a883341030a348ab93a713dcf206ab17, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.1 K 2024-11-07T14:18:34,607 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting fe2a4fcaa8a3420c8f76db202072401e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:34,608 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a883341030a348ab93a713dcf206ab17, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:34,608 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting eeefa918b0d14ac980c1eaee82da307b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1730989110552 2024-11-07T14:18:34,608 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ccbb08c9b2646d0b07dc4ce02d8c514, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1730989110552 2024-11-07T14:18:34,609 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 183d9825e8bb4dc39bbb15180410b5dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:34,609 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51d4a2dd6dd947ef9b8041c875ac79ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:34,620 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#224 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:34,621 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/66c547102027408fb9e21a9e9a4ee41f is 50, key is test_row_0/A:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:34,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T14:18:34,634 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:34,635 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e83fa3fe418f425a8016d7b03b25c79f is 50, key is test_row_0/B:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:34,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742094_1270 (size=12731) 2024-11-07T14:18:34,651 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/66c547102027408fb9e21a9e9a4ee41f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/66c547102027408fb9e21a9e9a4ee41f 2024-11-07T14:18:34,656 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into 66c547102027408fb9e21a9e9a4ee41f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:34,656 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:34,656 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989114605; duration=0sec 2024-11-07T14:18:34,656 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:34,656 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:34,656 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:34,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742095_1271 (size=12731) 2024-11-07T14:18:34,658 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:34,658 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:34,659 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:34,659 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c5406b16bfc4461584876b824c3d01ed, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.1 K 2024-11-07T14:18:34,659 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5406b16bfc4461584876b824c3d01ed, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1730989109576 2024-11-07T14:18:34,660 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd903b4b12144a6cb81c7fa220f1d2df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1730989110552 2024-11-07T14:18:34,660 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51c7e07d55814aff87b5c32872d835b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:34,663 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e83fa3fe418f425a8016d7b03b25c79f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e83fa3fe418f425a8016d7b03b25c79f 2024-11-07T14:18:34,669 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into e83fa3fe418f425a8016d7b03b25c79f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:34,669 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:34,669 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989114605; duration=0sec 2024-11-07T14:18:34,669 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:34,669 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:34,671 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:34,672 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/cdbaf66d99d14aed972e02f6ec57214c is 50, key is test_row_0/C:col10/1730989112697/Put/seqid=0 2024-11-07T14:18:34,685 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-07T14:18:34,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742096_1272 (size=12731) 2024-11-07T14:18:34,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:34,686 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:34,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:34,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/addc0c94fb0e426c8bf6fb91f1fb13d6 is 50, key is test_row_0/A:col10/1730989113316/Put/seqid=0 2024-11-07T14:18:34,696 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/cdbaf66d99d14aed972e02f6ec57214c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/cdbaf66d99d14aed972e02f6ec57214c 2024-11-07T14:18:34,703 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into cdbaf66d99d14aed972e02f6ec57214c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:34,703 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:34,703 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989114605; duration=0sec 2024-11-07T14:18:34,703 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:34,703 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:34,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:34,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:34,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742097_1273 (size=12201) 2024-11-07T14:18:34,736 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/addc0c94fb0e426c8bf6fb91f1fb13d6 2024-11-07T14:18:34,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8fb878b2a74e482e8a78835f28603f2d is 50, key is test_row_0/B:col10/1730989113316/Put/seqid=0 2024-11-07T14:18:34,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742098_1274 (size=12201) 2024-11-07T14:18:34,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989174806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T14:18:34,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989174909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989175111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T14:18:35,154 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8fb878b2a74e482e8a78835f28603f2d 2024-11-07T14:18:35,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 is 50, key is test_row_0/C:col10/1730989113316/Put/seqid=0 2024-11-07T14:18:35,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742099_1275 (size=12201) 2024-11-07T14:18:35,172 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 2024-11-07T14:18:35,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/addc0c94fb0e426c8bf6fb91f1fb13d6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6 2024-11-07T14:18:35,181 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6, entries=150, sequenceid=260, filesize=11.9 K 2024-11-07T14:18:35,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/8fb878b2a74e482e8a78835f28603f2d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d 2024-11-07T14:18:35,185 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d, entries=150, sequenceid=260, filesize=11.9 K 2024-11-07T14:18:35,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 2024-11-07T14:18:35,190 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8, entries=150, sequenceid=260, filesize=11.9 K 2024-11-07T14:18:35,191 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d9f2a49b2fc894490b8d6f930ab5fe35 in 505ms, sequenceid=260, compaction requested=false 2024-11-07T14:18:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-07T14:18:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-07T14:18:35,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-07T14:18:35,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 658 msec 2024-11-07T14:18:35,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 663 msec 2024-11-07T14:18:35,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:35,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:35,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4acb2b1802ab4d488af9f5cdcbada4f7 is 50, key is test_row_0/A:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:35,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742100_1276 (size=12301) 2024-11-07T14:18:35,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989175433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989175442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989175445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989175536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-07T14:18:35,636 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-07T14:18:35,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:35,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-07T14:18:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:35,639 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:35,640 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:35,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:35,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989175739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,791 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:35,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:35,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:35,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4acb2b1802ab4d488af9f5cdcbada4f7 2024-11-07T14:18:35,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c396c46567c2474fbcfa0ff6207c7270 is 50, key is test_row_0/B:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742101_1277 (size=12301) 2024-11-07T14:18:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:35,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:35,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:35,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:35,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:35,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:35,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:35,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:36,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989176044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:36,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:36,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c396c46567c2474fbcfa0ff6207c7270 2024-11-07T14:18:36,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:36,252 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:36,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:36,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/1ca8586ecc544d9fa9eee8ca18646b06 is 50, key is test_row_0/C:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742102_1278 (size=12301) 2024-11-07T14:18:36,404 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:36,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:36,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989176548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:36,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:36,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:36,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/1ca8586ecc544d9fa9eee8ca18646b06 2024-11-07T14:18:36,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/4acb2b1802ab4d488af9f5cdcbada4f7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7 2024-11-07T14:18:36,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7, entries=150, sequenceid=287, filesize=12.0 K 2024-11-07T14:18:36,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c396c46567c2474fbcfa0ff6207c7270 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270 2024-11-07T14:18:36,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270, entries=150, sequenceid=287, filesize=12.0 K 2024-11-07T14:18:36,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/1ca8586ecc544d9fa9eee8ca18646b06 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06 2024-11-07T14:18:36,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06, entries=150, sequenceid=287, filesize=12.0 K 2024-11-07T14:18:36,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1281ms, sequenceid=287, compaction requested=true 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:36,697 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:36,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:36,697 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:36,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:36,699 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:36,699 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:36,699 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,699 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e83fa3fe418f425a8016d7b03b25c79f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.4 K 2024-11-07T14:18:36,699 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:36,699 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:36,699 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,699 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e83fa3fe418f425a8016d7b03b25c79f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:36,699 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/66c547102027408fb9e21a9e9a4ee41f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.4 K 2024-11-07T14:18:36,700 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fb878b2a74e482e8a78835f28603f2d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1730989113316 2024-11-07T14:18:36,700 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66c547102027408fb9e21a9e9a4ee41f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:36,700 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c396c46567c2474fbcfa0ff6207c7270, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:36,700 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting addc0c94fb0e426c8bf6fb91f1fb13d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1730989113316 2024-11-07T14:18:36,701 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4acb2b1802ab4d488af9f5cdcbada4f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:36,709 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:36,710 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/d2fff3ad2eb545c5a3945c53df908a32 is 50, key is test_row_0/B:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:36,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:36,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-07T14:18:36,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:36,712 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:36,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:36,715 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:36,716 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a52ce54bd8fd40dea960152356261193 is 50, key is test_row_0/A:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:36,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742103_1279 (size=12983) 2024-11-07T14:18:36,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d4f37ae98a594319affcefce34bd118b is 50, key is test_row_0/A:col10/1730989115428/Put/seqid=0 2024-11-07T14:18:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:36,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742105_1281 (size=12301) 2024-11-07T14:18:36,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742104_1280 (size=12983) 2024-11-07T14:18:37,133 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/d2fff3ad2eb545c5a3945c53df908a32 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d2fff3ad2eb545c5a3945c53df908a32 2024-11-07T14:18:37,138 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into d2fff3ad2eb545c5a3945c53df908a32(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,138 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,138 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989116697; duration=0sec 2024-11-07T14:18:37,138 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:37,138 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:37,139 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:37,139 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:37,140 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:37,140 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,140 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/cdbaf66d99d14aed972e02f6ec57214c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.4 K 2024-11-07T14:18:37,140 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cdbaf66d99d14aed972e02f6ec57214c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989112697 2024-11-07T14:18:37,141 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2babf5ea0d7c42aa96ebdec0e6ee01a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1730989113316 2024-11-07T14:18:37,141 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ca8586ecc544d9fa9eee8ca18646b06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:37,148 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#236 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:37,149 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/57cebe1a35c844239dc165cea5388bc1 is 50, key is test_row_0/C:col10/1730989114775/Put/seqid=0 2024-11-07T14:18:37,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742106_1282 (size=12983) 2024-11-07T14:18:37,154 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d4f37ae98a594319affcefce34bd118b 2024-11-07T14:18:37,160 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a52ce54bd8fd40dea960152356261193 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a52ce54bd8fd40dea960152356261193 2024-11-07T14:18:37,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/9f42a716392c4c5fbb55c86d0fe7850f is 50, key is test_row_0/B:col10/1730989115428/Put/seqid=0 2024-11-07T14:18:37,167 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into a52ce54bd8fd40dea960152356261193(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,167 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,167 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989116697; duration=0sec 2024-11-07T14:18:37,167 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:37,167 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:37,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742107_1283 (size=12301) 2024-11-07T14:18:37,171 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/9f42a716392c4c5fbb55c86d0fe7850f 2024-11-07T14:18:37,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0a8a93f9f32246f4bff213e9b983e618 is 50, key is test_row_0/C:col10/1730989115428/Put/seqid=0 2024-11-07T14:18:37,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742108_1284 (size=12301) 2024-11-07T14:18:37,183 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0a8a93f9f32246f4bff213e9b983e618 2024-11-07T14:18:37,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/d4f37ae98a594319affcefce34bd118b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b 2024-11-07T14:18:37,192 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b, entries=150, sequenceid=297, filesize=12.0 K 2024-11-07T14:18:37,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/9f42a716392c4c5fbb55c86d0fe7850f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f 2024-11-07T14:18:37,197 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f, entries=150, sequenceid=297, filesize=12.0 K 2024-11-07T14:18:37,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0a8a93f9f32246f4bff213e9b983e618 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618 2024-11-07T14:18:37,202 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618, entries=150, sequenceid=297, filesize=12.0 K 2024-11-07T14:18:37,203 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for d9f2a49b2fc894490b8d6f930ab5fe35 in 492ms, sequenceid=297, compaction requested=false 2024-11-07T14:18:37,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-07T14:18:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-07T14:18:37,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-07T14:18:37,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5650 sec 2024-11-07T14:18:37,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.5690 sec 2024-11-07T14:18:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:37,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:37,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/498224edbdb04fef8b6148366b2513b4 is 50, key is test_row_0/A:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742109_1285 (size=12301) 2024-11-07T14:18:37,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/498224edbdb04fef8b6148366b2513b4 2024-11-07T14:18:37,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/f124a7538e2d40dfb95d46d841227051 is 50, key is test_row_0/B:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742110_1286 (size=12301) 2024-11-07T14:18:37,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/f124a7538e2d40dfb95d46d841227051 2024-11-07T14:18:37,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f0b51ab933ec4854a36f00bc97baaed1 is 50, key is test_row_0/C:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989177504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989177505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742111_1287 (size=12301) 2024-11-07T14:18:37,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f0b51ab933ec4854a36f00bc97baaed1 2024-11-07T14:18:37,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/498224edbdb04fef8b6148366b2513b4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4 2024-11-07T14:18:37,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4, entries=150, sequenceid=311, filesize=12.0 K 2024-11-07T14:18:37,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/f124a7538e2d40dfb95d46d841227051 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051 2024-11-07T14:18:37,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051, entries=150, sequenceid=311, filesize=12.0 K 2024-11-07T14:18:37,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f0b51ab933ec4854a36f00bc97baaed1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1 2024-11-07T14:18:37,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1, entries=150, sequenceid=311, filesize=12.0 K 2024-11-07T14:18:37,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d9f2a49b2fc894490b8d6f930ab5fe35 in 67ms, sequenceid=311, compaction requested=true 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:37,535 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:37,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:18:37,536 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:37,537 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:37,537 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,537 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a52ce54bd8fd40dea960152356261193, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.7 K 2024-11-07T14:18:37,537 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a52ce54bd8fd40dea960152356261193, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:37,538 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4f37ae98a594319affcefce34bd118b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730989115426 2024-11-07T14:18:37,538 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 498224edbdb04fef8b6148366b2513b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:37,546 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#242 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:37,547 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/bc0eceb315ad4c6d953785bbfdea8711 is 50, key is test_row_0/A:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742112_1288 (size=13085) 2024-11-07T14:18:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:37,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:37,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:37,559 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/bc0eceb315ad4c6d953785bbfdea8711 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/bc0eceb315ad4c6d953785bbfdea8711 2024-11-07T14:18:37,559 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/57cebe1a35c844239dc165cea5388bc1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/57cebe1a35c844239dc165cea5388bc1 2024-11-07T14:18:37,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/f2006c5494d3476fb876bf2e20d672bf is 50, key is test_row_0/A:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:37,568 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 57cebe1a35c844239dc165cea5388bc1(size=12.7 K), total size for store is 36.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,568 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,568 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989116697; duration=0sec 2024-11-07T14:18:37,568 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-07T14:18:37,568 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:37,568 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:37,568 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:37,570 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:37,570 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:37,570 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,570 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d2fff3ad2eb545c5a3945c53df908a32, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.7 K 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d2fff3ad2eb545c5a3945c53df908a32, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:37,571 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into bc0eceb315ad4c6d953785bbfdea8711(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,571 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989117535; duration=0sec 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:37,571 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f42a716392c4c5fbb55c86d0fe7850f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730989115426 2024-11-07T14:18:37,572 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f124a7538e2d40dfb95d46d841227051, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:37,573 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:37,573 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:37,573 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742113_1289 (size=12301) 2024-11-07T14:18:37,573 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/57cebe1a35c844239dc165cea5388bc1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.7 K 2024-11-07T14:18:37,574 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57cebe1a35c844239dc165cea5388bc1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730989114775 2024-11-07T14:18:37,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/f2006c5494d3476fb876bf2e20d672bf 2024-11-07T14:18:37,574 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a8a93f9f32246f4bff213e9b983e618, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730989115426 2024-11-07T14:18:37,575 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0b51ab933ec4854a36f00bc97baaed1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:37,586 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:37,587 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/a09e66f8ed704b0086d1385444ee0c6e is 50, key is test_row_0/B:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/efea7677657b4c8084e5be44c2c80251 is 50, key is test_row_0/B:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:37,590 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:37,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989177589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,592 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/576e9078dd8146118ed21f27949e71bf is 50, key is test_row_0/C:col10/1730989117466/Put/seqid=0 2024-11-07T14:18:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742115_1291 (size=13085) 2024-11-07T14:18:37,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989177608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989177609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,615 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/a09e66f8ed704b0086d1385444ee0c6e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a09e66f8ed704b0086d1385444ee0c6e 2024-11-07T14:18:37,624 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into a09e66f8ed704b0086d1385444ee0c6e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,624 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,624 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989117535; duration=0sec 2024-11-07T14:18:37,624 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:37,624 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:37,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742114_1290 (size=12301) 2024-11-07T14:18:37,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/efea7677657b4c8084e5be44c2c80251 2024-11-07T14:18:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742116_1292 (size=13085) 2024-11-07T14:18:37,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/d44392e9b3684629b01f9550111c1126 is 50, key is test_row_0/C:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:37,658 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/576e9078dd8146118ed21f27949e71bf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/576e9078dd8146118ed21f27949e71bf 2024-11-07T14:18:37,665 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 576e9078dd8146118ed21f27949e71bf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:37,665 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:37,665 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989117535; duration=0sec 2024-11-07T14:18:37,665 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:37,665 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:37,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742117_1293 (size=12301) 2024-11-07T14:18:37,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989177692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-07T14:18:37,744 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-07T14:18:37,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-07T14:18:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:37,748 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:37,749 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:37,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:37,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45122 deadline: 1730989177753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,754 DEBUG [Thread-1022 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:37,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989177811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989177811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:37,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989177895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,901 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:37,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:37,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:37,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:37,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:37,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:38,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/d44392e9b3684629b01f9550111c1126 2024-11-07T14:18:38,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/f2006c5494d3476fb876bf2e20d672bf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf 2024-11-07T14:18:38,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf, entries=150, sequenceid=337, filesize=12.0 K 2024-11-07T14:18:38,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/efea7677657b4c8084e5be44c2c80251 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251 2024-11-07T14:18:38,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251, entries=150, sequenceid=337, filesize=12.0 K 2024-11-07T14:18:38,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/d44392e9b3684629b01f9550111c1126 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126 2024-11-07T14:18:38,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126, entries=150, sequenceid=337, filesize=12.0 K 2024-11-07T14:18:38,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d9f2a49b2fc894490b8d6f930ab5fe35 in 554ms, sequenceid=337, compaction requested=false 2024-11-07T14:18:38,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:38,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:38,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:38,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:38,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74d649eb41244e81bd3ba12c72054a3d is 50, key is test_row_1/A:col10/1730989118114/Put/seqid=0 2024-11-07T14:18:38,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742118_1294 (size=9857) 2024-11-07T14:18:38,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74d649eb41244e81bd3ba12c72054a3d 2024-11-07T14:18:38,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/4ee1b6b63c1246299b5eb25366c05a35 is 50, key is test_row_1/B:col10/1730989118114/Put/seqid=0 2024-11-07T14:18:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742119_1295 (size=9857) 2024-11-07T14:18:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989178146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989178148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989178200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,207 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989178249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989178250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:38,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989178452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989178454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/4ee1b6b63c1246299b5eb25366c05a35 2024-11-07T14:18:38,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/c39a9997f04b4f0ba1ecc1c2af0bf70e is 50, key is test_row_1/C:col10/1730989118114/Put/seqid=0 2024-11-07T14:18:38,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742120_1296 (size=9857) 2024-11-07T14:18:38,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989178706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989178756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:38,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989178758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:38,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/c39a9997f04b4f0ba1ecc1c2af0bf70e 2024-11-07T14:18:38,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/74d649eb41244e81bd3ba12c72054a3d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d 2024-11-07T14:18:38,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d, entries=100, sequenceid=352, filesize=9.6 K 2024-11-07T14:18:38,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/4ee1b6b63c1246299b5eb25366c05a35 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35 2024-11-07T14:18:38,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35, entries=100, sequenceid=352, filesize=9.6 K 2024-11-07T14:18:38,973 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:38,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:38,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:38,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:38,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/c39a9997f04b4f0ba1ecc1c2af0bf70e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e 2024-11-07T14:18:38,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e, entries=100, sequenceid=352, filesize=9.6 K 2024-11-07T14:18:38,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d9f2a49b2fc894490b8d6f930ab5fe35 in 865ms, sequenceid=352, compaction requested=true 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:38,980 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:38,980 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:38,981 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:38,981 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:38,981 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:38,981 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:38,981 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,981 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:38,981 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a09e66f8ed704b0086d1385444ee0c6e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=34.4 K 2024-11-07T14:18:38,981 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/bc0eceb315ad4c6d953785bbfdea8711, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=34.4 K 2024-11-07T14:18:38,982 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a09e66f8ed704b0086d1385444ee0c6e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:38,982 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc0eceb315ad4c6d953785bbfdea8711, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:38,982 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting efea7677657b4c8084e5be44c2c80251, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730989117495 2024-11-07T14:18:38,982 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2006c5494d3476fb876bf2e20d672bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730989117495 2024-11-07T14:18:38,983 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74d649eb41244e81bd3ba12c72054a3d, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117587 2024-11-07T14:18:38,983 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ee1b6b63c1246299b5eb25366c05a35, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117587 2024-11-07T14:18:38,990 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:38,990 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:38,991 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/1f80a702abf74733bd50c1da8cf90749 is 50, key is test_row_0/B:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:38,991 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/c1beaeec70a04672beadf2fca2404f1d is 50, key is test_row_0/A:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742122_1298 (size=13187) 2024-11-07T14:18:39,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742121_1297 (size=13187) 2024-11-07T14:18:39,010 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/c1beaeec70a04672beadf2fca2404f1d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c1beaeec70a04672beadf2fca2404f1d 2024-11-07T14:18:39,010 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/1f80a702abf74733bd50c1da8cf90749 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/1f80a702abf74733bd50c1da8cf90749 2024-11-07T14:18:39,015 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into c1beaeec70a04672beadf2fca2404f1d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:39,015 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:39,015 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989118980; duration=0sec 2024-11-07T14:18:39,015 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:39,015 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:39,015 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:39,016 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into 1f80a702abf74733bd50c1da8cf90749(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:39,016 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:39,016 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989118980; duration=0sec 2024-11-07T14:18:39,017 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:39,017 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:39,017 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:39,017 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:39,017 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:39,017 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/576e9078dd8146118ed21f27949e71bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=34.4 K 2024-11-07T14:18:39,017 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 576e9078dd8146118ed21f27949e71bf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1730989117460 2024-11-07T14:18:39,018 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d44392e9b3684629b01f9550111c1126, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1730989117495 2024-11-07T14:18:39,018 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c39a9997f04b4f0ba1ecc1c2af0bf70e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117587 2024-11-07T14:18:39,025 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:39,026 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/15b14a8e95b54b06a38b57dd7927207a is 50, key is test_row_0/C:col10/1730989117495/Put/seqid=0 2024-11-07T14:18:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742123_1299 (size=13187) 2024-11-07T14:18:39,126 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-07T14:18:39,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:39,127 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:18:39,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:39,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/67cf5ff9aea64a99812d427bb44ffa65 is 50, key is test_row_0/A:col10/1730989118144/Put/seqid=0 2024-11-07T14:18:39,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742124_1300 (size=12301) 2024-11-07T14:18:39,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:39,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989179292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989179293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989179396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989179396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,435 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/15b14a8e95b54b06a38b57dd7927207a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/15b14a8e95b54b06a38b57dd7927207a 2024-11-07T14:18:39,440 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into 15b14a8e95b54b06a38b57dd7927207a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:39,440 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:39,440 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989118980; duration=0sec 2024-11-07T14:18:39,440 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:39,440 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:39,537 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/67cf5ff9aea64a99812d427bb44ffa65 2024-11-07T14:18:39,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/0b6649e92315431bb54c697ff4ee727d is 50, key is test_row_0/B:col10/1730989118144/Put/seqid=0 2024-11-07T14:18:39,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742125_1301 (size=12301) 2024-11-07T14:18:39,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989179598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989179599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989179711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:39,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989179900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989179902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:39,949 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/0b6649e92315431bb54c697ff4ee727d 2024-11-07T14:18:39,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0207402a6a7449759f739876a539d773 is 50, key is test_row_0/C:col10/1730989118144/Put/seqid=0 2024-11-07T14:18:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742126_1302 (size=12301) 2024-11-07T14:18:40,362 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0207402a6a7449759f739876a539d773 2024-11-07T14:18:40,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/67cf5ff9aea64a99812d427bb44ffa65 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65 2024-11-07T14:18:40,371 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65, entries=150, sequenceid=379, filesize=12.0 K 2024-11-07T14:18:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/0b6649e92315431bb54c697ff4ee727d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d 2024-11-07T14:18:40,375 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d, entries=150, sequenceid=379, filesize=12.0 K 2024-11-07T14:18:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/0207402a6a7449759f739876a539d773 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773 2024-11-07T14:18:40,384 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773, entries=150, sequenceid=379, filesize=12.0 K 2024-11-07T14:18:40,385 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1258ms, sequenceid=379, compaction requested=false 2024-11-07T14:18:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-07T14:18:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-07T14:18:40,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-07T14:18:40,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6370 sec 2024-11-07T14:18:40,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.6440 sec 2024-11-07T14:18:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:40,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:40,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/824beedefa954821bd82124e0a1ab544 is 50, key is test_row_0/A:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:40,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742127_1303 (size=12301) 2024-11-07T14:18:40,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989180430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989180432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989180533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989180534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989180735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989180737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:40,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/824beedefa954821bd82124e0a1ab544 2024-11-07T14:18:40,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c8f5c50e1aed4091920dbeba11e35929 is 50, key is test_row_0/B:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:40,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742128_1304 (size=12301) 2024-11-07T14:18:41,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989181037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989181039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c8f5c50e1aed4091920dbeba11e35929 2024-11-07T14:18:41,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f233faac114b4e75848c35bd90236bcb is 50, key is test_row_0/C:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:41,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742129_1305 (size=12301) 2024-11-07T14:18:41,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989181539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989181543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f233faac114b4e75848c35bd90236bcb 2024-11-07T14:18:41,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/824beedefa954821bd82124e0a1ab544 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544 2024-11-07T14:18:41,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T14:18:41,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/c8f5c50e1aed4091920dbeba11e35929 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929 2024-11-07T14:18:41,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T14:18:41,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/f233faac114b4e75848c35bd90236bcb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb 2024-11-07T14:18:41,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb, entries=150, sequenceid=393, filesize=12.0 K 2024-11-07T14:18:41,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1258ms, sequenceid=393, compaction requested=true 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:41,665 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:41,665 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:41,667 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:41,667 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:41,667 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/1f80a702abf74733bd50c1da8cf90749, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.9 K 2024-11-07T14:18:41,667 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c1beaeec70a04672beadf2fca2404f1d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.9 K 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f80a702abf74733bd50c1da8cf90749, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117495 2024-11-07T14:18:41,667 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1beaeec70a04672beadf2fca2404f1d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117495 2024-11-07T14:18:41,668 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b6649e92315431bb54c697ff4ee727d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1730989118137 2024-11-07T14:18:41,668 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67cf5ff9aea64a99812d427bb44ffa65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1730989118137 2024-11-07T14:18:41,668 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 824beedefa954821bd82124e0a1ab544, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:41,668 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c8f5c50e1aed4091920dbeba11e35929, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:41,677 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:41,678 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/b44ca1a8a1c043dbb43ce506f0429360 is 50, key is test_row_0/B:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:41,678 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:41,679 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/ed9e64dc9f2b473089ef8c49e21de0dd is 50, key is test_row_0/A:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:41,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742130_1306 (size=13289) 2024-11-07T14:18:41,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742131_1307 (size=13289) 2024-11-07T14:18:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:41,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:41,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e441c7f33f45407c9fcd0de9fd0c0161 is 50, key is test_row_0/A:col10/1730989120431/Put/seqid=0 2024-11-07T14:18:41,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989181754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742132_1308 (size=12301) 2024-11-07T14:18:41,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-07T14:18:41,852 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-07T14:18:41,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:41,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-07T14:18:41,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:41,855 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:41,856 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:41,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:41,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:41,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989181859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:42,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989182061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,092 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/b44ca1a8a1c043dbb43ce506f0429360 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/b44ca1a8a1c043dbb43ce506f0429360 2024-11-07T14:18:42,099 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into b44ca1a8a1c043dbb43ce506f0429360(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:42,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:42,099 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=13, startTime=1730989121665; duration=0sec 2024-11-07T14:18:42,099 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/ed9e64dc9f2b473089ef8c49e21de0dd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/ed9e64dc9f2b473089ef8c49e21de0dd 2024-11-07T14:18:42,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:42,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:42,099 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:18:42,100 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:18:42,100 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:42,100 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,100 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/15b14a8e95b54b06a38b57dd7927207a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=36.9 K 2024-11-07T14:18:42,100 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 15b14a8e95b54b06a38b57dd7927207a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1730989117495 2024-11-07T14:18:42,101 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0207402a6a7449759f739876a539d773, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1730989118137 2024-11-07T14:18:42,102 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f233faac114b4e75848c35bd90236bcb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:42,104 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into ed9e64dc9f2b473089ef8c49e21de0dd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:42,104 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:42,104 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=13, startTime=1730989121665; duration=0sec 2024-11-07T14:18:42,104 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:42,104 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:42,115 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#263 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:42,115 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/aa96a71180a04839936de228e54b3a5c is 50, key is test_row_0/C:col10/1730989119292/Put/seqid=0 2024-11-07T14:18:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742133_1309 (size=13289) 2024-11-07T14:18:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:42,161 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e441c7f33f45407c9fcd0de9fd0c0161 2024-11-07T14:18:42,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/ea35b7fccc824e29bf39fc59df543caf is 50, key is test_row_0/B:col10/1730989120431/Put/seqid=0 2024-11-07T14:18:42,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742134_1310 (size=12301) 2024-11-07T14:18:42,313 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989182363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:42,466 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,525 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/aa96a71180a04839936de228e54b3a5c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/aa96a71180a04839936de228e54b3a5c 2024-11-07T14:18:42,530 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into aa96a71180a04839936de228e54b3a5c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:42,530 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:42,530 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=13, startTime=1730989121665; duration=0sec 2024-11-07T14:18:42,530 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:42,530 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:42,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989182549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989182549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/ea35b7fccc824e29bf39fc59df543caf 2024-11-07T14:18:42,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/4ba67f0bc24f42158301b3417e4dabbc is 50, key is test_row_0/C:col10/1730989120431/Put/seqid=0 2024-11-07T14:18:42,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742135_1311 (size=12301) 2024-11-07T14:18:42,619 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,772 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989182870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,925 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:42,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:42,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:42,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:42,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:42,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/4ba67f0bc24f42158301b3417e4dabbc 2024-11-07T14:18:42,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e441c7f33f45407c9fcd0de9fd0c0161 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161 2024-11-07T14:18:42,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161, entries=150, sequenceid=417, filesize=12.0 K 2024-11-07T14:18:42,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/ea35b7fccc824e29bf39fc59df543caf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf 2024-11-07T14:18:42,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf, entries=150, sequenceid=417, filesize=12.0 K 2024-11-07T14:18:43,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/4ba67f0bc24f42158301b3417e4dabbc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc 2024-11-07T14:18:43,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc, entries=150, sequenceid=417, filesize=12.0 K 2024-11-07T14:18:43,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1278ms, sequenceid=417, compaction requested=false 2024-11-07T14:18:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:43,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:43,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-07T14:18:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:43,078 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:43,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/99375b65fde94cc7aa0d257381b707f4 is 50, key is test_row_0/A:col10/1730989121753/Put/seqid=0 2024-11-07T14:18:43,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742136_1312 (size=12301) 2024-11-07T14:18:43,489 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/99375b65fde94cc7aa0d257381b707f4 2024-11-07T14:18:43,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/a099e0c07f9848afa45bc5739bfec1a6 is 50, key is test_row_0/B:col10/1730989121753/Put/seqid=0 2024-11-07T14:18:43,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742137_1313 (size=12301) 2024-11-07T14:18:43,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:43,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:43,902 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/a099e0c07f9848afa45bc5739bfec1a6 2024-11-07T14:18:43,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/7700c4a1f9654d81a26093074f58d6b3 is 50, key is test_row_0/C:col10/1730989121753/Put/seqid=0 2024-11-07T14:18:43,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742138_1314 (size=12301) 2024-11-07T14:18:43,916 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/7700c4a1f9654d81a26093074f58d6b3 2024-11-07T14:18:43,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/99375b65fde94cc7aa0d257381b707f4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4 2024-11-07T14:18:43,926 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4, entries=150, sequenceid=432, filesize=12.0 K 2024-11-07T14:18:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/a099e0c07f9848afa45bc5739bfec1a6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6 2024-11-07T14:18:43,931 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6, entries=150, sequenceid=432, filesize=12.0 K 2024-11-07T14:18:43,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/7700c4a1f9654d81a26093074f58d6b3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3 2024-11-07T14:18:43,936 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3, entries=150, sequenceid=432, filesize=12.0 K 2024-11-07T14:18:43,936 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=134.18 KB/137400 for d9f2a49b2fc894490b8d6f930ab5fe35 in 858ms, sequenceid=432, compaction requested=true 2024-11-07T14:18:43,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:43,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:43,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-07T14:18:43,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-07T14:18:43,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-07T14:18:43,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0820 sec 2024-11-07T14:18:43,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:43,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:43,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:43,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.0860 sec 2024-11-07T14:18:43,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a5dc1b5aac744ae091f9212b141904ba is 50, key is test_row_0/A:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:43,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742139_1315 (size=14741) 2024-11-07T14:18:43,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-07T14:18:43,959 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-07T14:18:43,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:43,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-07T14:18:43,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:43,962 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:43,962 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:43,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:43,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989183974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:43,990 DEBUG [Thread-1033 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:51818 2024-11-07T14:18:43,990 DEBUG [Thread-1033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:43,990 DEBUG [Thread-1041 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:51818 2024-11-07T14:18:43,990 DEBUG [Thread-1041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:43,991 DEBUG [Thread-1035 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:51818 2024-11-07T14:18:43,991 DEBUG [Thread-1035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:43,992 DEBUG [Thread-1037 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:51818 2024-11-07T14:18:43,992 DEBUG [Thread-1037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:43,992 DEBUG [Thread-1039 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:51818 2024-11-07T14:18:43,992 DEBUG [Thread-1039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:44,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:44,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989184077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,113 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:44,266 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989184279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a5dc1b5aac744ae091f9212b141904ba 2024-11-07T14:18:44,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 is 50, key is test_row_0/B:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:44,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742140_1316 (size=12301) 2024-11-07T14:18:44,418 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45086 deadline: 1730989184561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,562 DEBUG [Thread-1028 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:44,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:44,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45096 deadline: 1730989184564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,565 DEBUG [Thread-1024 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:44,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989184581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:44,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1730989184608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,609 DEBUG [Thread-1026 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:18:44,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 2024-11-07T14:18:44,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e561d9bb0ea74600a65433cc18da437f is 50, key is test_row_0/C:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:44,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742141_1317 (size=12301) 2024-11-07T14:18:44,876 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:44,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:44,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:44,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:44,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:45,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:45,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:45,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:45,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:45,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45108 deadline: 1730989185084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:45,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e561d9bb0ea74600a65433cc18da437f 2024-11-07T14:18:45,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:45,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:45,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/a5dc1b5aac744ae091f9212b141904ba as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba 2024-11-07T14:18:45,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba, entries=200, sequenceid=456, filesize=14.4 K 2024-11-07T14:18:45,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 2024-11-07T14:18:45,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6, entries=150, sequenceid=456, filesize=12.0 K 2024-11-07T14:18:45,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e561d9bb0ea74600a65433cc18da437f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f 2024-11-07T14:18:45,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f, entries=150, sequenceid=456, filesize=12.0 K 2024-11-07T14:18:45,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1255ms, sequenceid=456, compaction requested=true 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9f2a49b2fc894490b8d6f930ab5fe35:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:18:45,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:45,194 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:45,194 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:45,195 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:45,195 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52632 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/B is initiating minor compaction (all files) 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/A is initiating minor compaction (all files) 2024-11-07T14:18:45,196 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/A in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,196 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/B in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,196 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/b44ca1a8a1c043dbb43ce506f0429360, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=49.0 K 2024-11-07T14:18:45,196 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/ed9e64dc9f2b473089ef8c49e21de0dd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=51.4 K 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting b44ca1a8a1c043dbb43ce506f0429360, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed9e64dc9f2b473089ef8c49e21de0dd, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ea35b7fccc824e29bf39fc59df543caf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1730989120429 2024-11-07T14:18:45,196 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e441c7f33f45407c9fcd0de9fd0c0161, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1730989120429 2024-11-07T14:18:45,197 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a099e0c07f9848afa45bc5739bfec1a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1730989121741 2024-11-07T14:18:45,197 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99375b65fde94cc7aa0d257381b707f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1730989121741 2024-11-07T14:18:45,197 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cbe15d4a3abd4d3091dafdcb4fffcbc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1730989123926 2024-11-07T14:18:45,197 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5dc1b5aac744ae091f9212b141904ba, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1730989123926 2024-11-07T14:18:45,205 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#B#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:45,205 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/16bf1ac9fe86430988d505dc43212a65 is 50, key is test_row_0/B:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:45,205 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#A#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:45,206 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/3db88b4dc37045159ce9dbb1f27624a6 is 50, key is test_row_0/A:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:45,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742142_1318 (size=13425) 2024-11-07T14:18:45,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742143_1319 (size=13425) 2024-11-07T14:18:45,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:45,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-07T14:18:45,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,335 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:45,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:45,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/2f8f42d99fba4475a934ea3410b51d56 is 50, key is test_row_0/A:col10/1730989123971/Put/seqid=0 2024-11-07T14:18:45,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742144_1320 (size=12301) 2024-11-07T14:18:45,612 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/3db88b4dc37045159ce9dbb1f27624a6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/3db88b4dc37045159ce9dbb1f27624a6 2024-11-07T14:18:45,613 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/16bf1ac9fe86430988d505dc43212a65 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/16bf1ac9fe86430988d505dc43212a65 2024-11-07T14:18:45,616 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/B of d9f2a49b2fc894490b8d6f930ab5fe35 into 16bf1ac9fe86430988d505dc43212a65(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:45,616 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/A of d9f2a49b2fc894490b8d6f930ab5fe35 into 3db88b4dc37045159ce9dbb1f27624a6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:45,616 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:45,616 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:45,616 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/B, priority=12, startTime=1730989125194; duration=0sec 2024-11-07T14:18:45,616 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/A, priority=12, startTime=1730989125194; duration=0sec 2024-11-07T14:18:45,617 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:18:45,617 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:B 2024-11-07T14:18:45,617 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:18:45,617 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:45,617 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:A 2024-11-07T14:18:45,618 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:18:45,618 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): d9f2a49b2fc894490b8d6f930ab5fe35/C is initiating minor compaction (all files) 2024-11-07T14:18:45,618 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d9f2a49b2fc894490b8d6f930ab5fe35/C in TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:45,618 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/aa96a71180a04839936de228e54b3a5c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp, totalSize=49.0 K 2024-11-07T14:18:45,618 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting aa96a71180a04839936de228e54b3a5c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1730989119288 2024-11-07T14:18:45,618 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ba67f0bc24f42158301b3417e4dabbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1730989120429 2024-11-07T14:18:45,619 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 7700c4a1f9654d81a26093074f58d6b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1730989121741 2024-11-07T14:18:45,619 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e561d9bb0ea74600a65433cc18da437f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1730989123926 2024-11-07T14:18:45,625 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9f2a49b2fc894490b8d6f930ab5fe35#C#compaction#275 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:18:45,626 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/dff18578f3544e87b5c42fb2623bd9ec is 50, key is test_row_0/C:col10/1730989123926/Put/seqid=0 2024-11-07T14:18:45,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742145_1321 (size=13425) 2024-11-07T14:18:45,744 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/2f8f42d99fba4475a934ea3410b51d56 2024-11-07T14:18:45,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/aadf1c1739934dcfb7775131660be7c9 is 50, key is test_row_0/B:col10/1730989123971/Put/seqid=0 2024-11-07T14:18:45,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742146_1322 (size=12301) 2024-11-07T14:18:46,033 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/dff18578f3544e87b5c42fb2623bd9ec as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dff18578f3544e87b5c42fb2623bd9ec 2024-11-07T14:18:46,037 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d9f2a49b2fc894490b8d6f930ab5fe35/C of d9f2a49b2fc894490b8d6f930ab5fe35 into dff18578f3544e87b5c42fb2623bd9ec(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:18:46,037 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:46,037 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35., storeName=d9f2a49b2fc894490b8d6f930ab5fe35/C, priority=12, startTime=1730989125194; duration=0sec 2024-11-07T14:18:46,037 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:18:46,037 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9f2a49b2fc894490b8d6f930ab5fe35:C 2024-11-07T14:18:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:46,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. as already flushing 2024-11-07T14:18:46,090 DEBUG [Thread-1030 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:51818 2024-11-07T14:18:46,090 DEBUG [Thread-1030 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:46,154 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/aadf1c1739934dcfb7775131660be7c9 2024-11-07T14:18:46,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e28bf66774af45809fdfa744310607e2 is 50, key is test_row_0/C:col10/1730989123971/Put/seqid=0 2024-11-07T14:18:46,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742147_1323 (size=12301) 2024-11-07T14:18:46,565 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e28bf66774af45809fdfa744310607e2 2024-11-07T14:18:46,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/2f8f42d99fba4475a934ea3410b51d56 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/2f8f42d99fba4475a934ea3410b51d56 2024-11-07T14:18:46,574 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/2f8f42d99fba4475a934ea3410b51d56, entries=150, sequenceid=468, filesize=12.0 K 2024-11-07T14:18:46,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/aadf1c1739934dcfb7775131660be7c9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aadf1c1739934dcfb7775131660be7c9 2024-11-07T14:18:46,578 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aadf1c1739934dcfb7775131660be7c9, entries=150, sequenceid=468, filesize=12.0 K 2024-11-07T14:18:46,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/e28bf66774af45809fdfa744310607e2 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e28bf66774af45809fdfa744310607e2 2024-11-07T14:18:46,583 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e28bf66774af45809fdfa744310607e2, entries=150, sequenceid=468, filesize=12.0 K 2024-11-07T14:18:46,583 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=6.71 KB/6870 for d9f2a49b2fc894490b8d6f930ab5fe35 in 1248ms, sequenceid=468, compaction requested=false 2024-11-07T14:18:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-07T14:18:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-07T14:18:46,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-07T14:18:46,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6230 sec 2024-11-07T14:18:46,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.6250 sec 2024-11-07T14:18:47,850 DEBUG [Thread-1022 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x669e1999 to 127.0.0.1:51818 2024-11-07T14:18:47,851 DEBUG [Thread-1022 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:48,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-07T14:18:48,067 INFO [Thread-1032 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-07T14:18:48,603 DEBUG [Thread-1028 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:51818 2024-11-07T14:18:48,603 DEBUG [Thread-1028 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:48,605 DEBUG [Thread-1024 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72aa9ee5 to 127.0.0.1:51818 2024-11-07T14:18:48,605 DEBUG [Thread-1024 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:52,404 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T14:18:54,641 DEBUG [Thread-1026 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:51818 2024-11-07T14:18:54,641 DEBUG [Thread-1026 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7643 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7259 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7575 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7641 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7314 2024-11-07T14:18:54,641 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:18:54,641 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:18:54,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b6adc5 to 127.0.0.1:51818 2024-11-07T14:18:54,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:18:54,642 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:18:54,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:18:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:54,645 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989134645"}]},"ts":"1730989134645"} 2024-11-07T14:18:54,646 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:18:54,648 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:18:54,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:18:54,649 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, UNASSIGN}] 2024-11-07T14:18:54,650 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, UNASSIGN 2024-11-07T14:18:54,650 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=d9f2a49b2fc894490b8d6f930ab5fe35, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:54,651 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:18:54,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; CloseRegionProcedure d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:54,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:54,802 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:54,803 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(124): Close d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1681): Closing d9f2a49b2fc894490b8d6f930ab5fe35, disabling compactions & flushes 2024-11-07T14:18:54,803 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. after waiting 0 ms 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:54,803 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(2837): Flushing d9f2a49b2fc894490b8d6f930ab5fe35 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=A 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=B 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d9f2a49b2fc894490b8d6f930ab5fe35, store=C 2024-11-07T14:18:54,803 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:54,807 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e30b85d657d7472ba25dec5df33d6add is 50, key is test_row_0/A:col10/1730989127849/Put/seqid=0 2024-11-07T14:18:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742148_1324 (size=12301) 2024-11-07T14:18:54,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:55,211 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e30b85d657d7472ba25dec5df33d6add 2024-11-07T14:18:55,218 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e811f422fc524766bf5c193a4a1bf6c3 is 50, key is test_row_0/B:col10/1730989127849/Put/seqid=0 2024-11-07T14:18:55,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742149_1325 (size=12301) 2024-11-07T14:18:55,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:55,622 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e811f422fc524766bf5c193a4a1bf6c3 2024-11-07T14:18:55,628 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/5b540abe3c114ac29327536381250a3f is 50, key is test_row_0/C:col10/1730989127849/Put/seqid=0 2024-11-07T14:18:55,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742150_1326 (size=12301) 2024-11-07T14:18:55,632 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/5b540abe3c114ac29327536381250a3f 2024-11-07T14:18:55,635 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/A/e30b85d657d7472ba25dec5df33d6add as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e30b85d657d7472ba25dec5df33d6add 2024-11-07T14:18:55,638 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e30b85d657d7472ba25dec5df33d6add, entries=150, sequenceid=479, filesize=12.0 K 2024-11-07T14:18:55,639 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/B/e811f422fc524766bf5c193a4a1bf6c3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e811f422fc524766bf5c193a4a1bf6c3 2024-11-07T14:18:55,641 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e811f422fc524766bf5c193a4a1bf6c3, entries=150, sequenceid=479, filesize=12.0 K 2024-11-07T14:18:55,642 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/.tmp/C/5b540abe3c114ac29327536381250a3f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5b540abe3c114ac29327536381250a3f 2024-11-07T14:18:55,645 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5b540abe3c114ac29327536381250a3f, entries=150, sequenceid=479, filesize=12.0 K 2024-11-07T14:18:55,645 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d9f2a49b2fc894490b8d6f930ab5fe35 in 842ms, sequenceid=479, compaction requested=true 2024-11-07T14:18:55,646 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/b8742b2b7f2a4914b7296bd7dd0d302f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a48d0b1eadd0442a97aaaabc65d44501, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a56191bc5ff9445faf5d158a278d4f87, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a883341030a348ab93a713dcf206ab17, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/66c547102027408fb9e21a9e9a4ee41f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a52ce54bd8fd40dea960152356261193, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/bc0eceb315ad4c6d953785bbfdea8711, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c1beaeec70a04672beadf2fca2404f1d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/ed9e64dc9f2b473089ef8c49e21de0dd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba] to archive 2024-11-07T14:18:55,647 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:55,648 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/304f6021787045179c92ebc2db7e04de 2024-11-07T14:18:55,649 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/be8fc892d67f45fba9abcf68af683d78 2024-11-07T14:18:55,650 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74a57279b9c24e288d4a81dba830a5f8 2024-11-07T14:18:55,651 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/b8742b2b7f2a4914b7296bd7dd0d302f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/b8742b2b7f2a4914b7296bd7dd0d302f 2024-11-07T14:18:55,652 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4a43549d252c42f5b3085c953b01daec 2024-11-07T14:18:55,653 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51b4a1a37a4c40dba8619465cad233d8 2024-11-07T14:18:55,654 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a48d0b1eadd0442a97aaaabc65d44501 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a48d0b1eadd0442a97aaaabc65d44501 2024-11-07T14:18:55,655 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d2b88d00580b48df8c0373ccf2766ddc 2024-11-07T14:18:55,655 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c01cb5d57f0b410ea8fa3120adcf2d68 2024-11-07T14:18:55,656 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a56191bc5ff9445faf5d158a278d4f87 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a56191bc5ff9445faf5d158a278d4f87 2024-11-07T14:18:55,657 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/1edfd2ff2e374865aa39851f263e53de 2024-11-07T14:18:55,658 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/6af6507f90b04b8cbe0cfc06d3c2183e 2024-11-07T14:18:55,659 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e5fcbb9c729f46049d8bb599e9d5f5a4 2024-11-07T14:18:55,660 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a883341030a348ab93a713dcf206ab17 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a883341030a348ab93a713dcf206ab17 2024-11-07T14:18:55,661 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/85212e46c26c48bbab43d0fb6e7708eb 2024-11-07T14:18:55,662 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/0ccbb08c9b2646d0b07dc4ce02d8c514 2024-11-07T14:18:55,663 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/66c547102027408fb9e21a9e9a4ee41f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/66c547102027408fb9e21a9e9a4ee41f 2024-11-07T14:18:55,664 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/51d4a2dd6dd947ef9b8041c875ac79ff 2024-11-07T14:18:55,665 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/addc0c94fb0e426c8bf6fb91f1fb13d6 2024-11-07T14:18:55,666 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a52ce54bd8fd40dea960152356261193 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a52ce54bd8fd40dea960152356261193 2024-11-07T14:18:55,666 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/4acb2b1802ab4d488af9f5cdcbada4f7 2024-11-07T14:18:55,667 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/d4f37ae98a594319affcefce34bd118b 2024-11-07T14:18:55,668 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/bc0eceb315ad4c6d953785bbfdea8711 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/bc0eceb315ad4c6d953785bbfdea8711 2024-11-07T14:18:55,669 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/498224edbdb04fef8b6148366b2513b4 2024-11-07T14:18:55,670 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/f2006c5494d3476fb876bf2e20d672bf 2024-11-07T14:18:55,671 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c1beaeec70a04672beadf2fca2404f1d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/c1beaeec70a04672beadf2fca2404f1d 2024-11-07T14:18:55,672 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/74d649eb41244e81bd3ba12c72054a3d 2024-11-07T14:18:55,673 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/67cf5ff9aea64a99812d427bb44ffa65 2024-11-07T14:18:55,674 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/ed9e64dc9f2b473089ef8c49e21de0dd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/ed9e64dc9f2b473089ef8c49e21de0dd 2024-11-07T14:18:55,675 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/824beedefa954821bd82124e0a1ab544 2024-11-07T14:18:55,675 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e441c7f33f45407c9fcd0de9fd0c0161 2024-11-07T14:18:55,676 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/99375b65fde94cc7aa0d257381b707f4 2024-11-07T14:18:55,677 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/a5dc1b5aac744ae091f9212b141904ba 2024-11-07T14:18:55,678 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d6e5a0bf22234bdf9f3dddefe3c0688e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aebf96503800437fbc3041d4246374cf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/259bb8dcd8e14468a5a70e3fda02839a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/fe2a4fcaa8a3420c8f76db202072401e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e83fa3fe418f425a8016d7b03b25c79f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d2fff3ad2eb545c5a3945c53df908a32, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a09e66f8ed704b0086d1385444ee0c6e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/1f80a702abf74733bd50c1da8cf90749, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/b44ca1a8a1c043dbb43ce506f0429360, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6] to archive 2024-11-07T14:18:55,679 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:55,680 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/7c74afb4edd84d4c90df411a08053569 2024-11-07T14:18:55,681 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/2f034c1fa4ff43a59ec29d768ef0471c 2024-11-07T14:18:55,682 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/13219b548b584a50bb1ea1bc78147855 2024-11-07T14:18:55,683 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d6e5a0bf22234bdf9f3dddefe3c0688e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d6e5a0bf22234bdf9f3dddefe3c0688e 2024-11-07T14:18:55,684 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/590005240ae5425d800c86abfb43e7ff 2024-11-07T14:18:55,685 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/6fe97d0635fc40fe888156aaebaab153 2024-11-07T14:18:55,686 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aebf96503800437fbc3041d4246374cf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aebf96503800437fbc3041d4246374cf 2024-11-07T14:18:55,686 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8696a14836da426cbd7f5fb22034882d 2024-11-07T14:18:55,687 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/858266c9a3dd4e0ab705b10c7da7c604 2024-11-07T14:18:55,688 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/259bb8dcd8e14468a5a70e3fda02839a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/259bb8dcd8e14468a5a70e3fda02839a 2024-11-07T14:18:55,689 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8cd733df857043a5a2154c2b182687da 2024-11-07T14:18:55,690 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/07887b5f20cd4e9e9dfb0cc0893f625c 2024-11-07T14:18:55,691 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e434ba63564e489fa98cab6aaf43ff0d 2024-11-07T14:18:55,692 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/fe2a4fcaa8a3420c8f76db202072401e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/fe2a4fcaa8a3420c8f76db202072401e 2024-11-07T14:18:55,693 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/3957a2914fa14932b5dbdfd2f8b9b2f5 2024-11-07T14:18:55,694 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/eeefa918b0d14ac980c1eaee82da307b 2024-11-07T14:18:55,694 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e83fa3fe418f425a8016d7b03b25c79f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e83fa3fe418f425a8016d7b03b25c79f 2024-11-07T14:18:55,695 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/183d9825e8bb4dc39bbb15180410b5dd 2024-11-07T14:18:55,696 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/8fb878b2a74e482e8a78835f28603f2d 2024-11-07T14:18:55,697 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d2fff3ad2eb545c5a3945c53df908a32 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/d2fff3ad2eb545c5a3945c53df908a32 2024-11-07T14:18:55,698 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c396c46567c2474fbcfa0ff6207c7270 2024-11-07T14:18:55,699 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/9f42a716392c4c5fbb55c86d0fe7850f 2024-11-07T14:18:55,700 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a09e66f8ed704b0086d1385444ee0c6e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a09e66f8ed704b0086d1385444ee0c6e 2024-11-07T14:18:55,700 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/f124a7538e2d40dfb95d46d841227051 2024-11-07T14:18:55,701 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/efea7677657b4c8084e5be44c2c80251 2024-11-07T14:18:55,702 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/1f80a702abf74733bd50c1da8cf90749 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/1f80a702abf74733bd50c1da8cf90749 2024-11-07T14:18:55,703 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/4ee1b6b63c1246299b5eb25366c05a35 2024-11-07T14:18:55,704 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/0b6649e92315431bb54c697ff4ee727d 2024-11-07T14:18:55,705 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/b44ca1a8a1c043dbb43ce506f0429360 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/b44ca1a8a1c043dbb43ce506f0429360 2024-11-07T14:18:55,706 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/c8f5c50e1aed4091920dbeba11e35929 2024-11-07T14:18:55,707 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/ea35b7fccc824e29bf39fc59df543caf 2024-11-07T14:18:55,708 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/a099e0c07f9848afa45bc5739bfec1a6 2024-11-07T14:18:55,709 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/cbe15d4a3abd4d3091dafdcb4fffcbc6 2024-11-07T14:18:55,710 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/106b09c3c2d64841a84f1aeb3d26faf6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2aea7f5bbe43472fbafd5427e48742dc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5841af2d948d461ea71c77485f626650, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c5406b16bfc4461584876b824c3d01ed, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/cdbaf66d99d14aed972e02f6ec57214c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/57cebe1a35c844239dc165cea5388bc1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/576e9078dd8146118ed21f27949e71bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/15b14a8e95b54b06a38b57dd7927207a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/aa96a71180a04839936de228e54b3a5c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f] to archive 2024-11-07T14:18:55,711 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:18:55,712 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/3c695a677c664af5aa963fd3a3704cea 2024-11-07T14:18:55,713 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/65febfbe71d4483694eae513c5cfb4db 2024-11-07T14:18:55,714 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0b49ca003dd247ddb641449a7001fd7b 2024-11-07T14:18:55,715 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/106b09c3c2d64841a84f1aeb3d26faf6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/106b09c3c2d64841a84f1aeb3d26faf6 2024-11-07T14:18:55,715 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/337c34a98cff41cb82dadbbd1c127873 2024-11-07T14:18:55,716 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/54b936af5e384840a3ebfe90b6019626 2024-11-07T14:18:55,717 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2aea7f5bbe43472fbafd5427e48742dc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2aea7f5bbe43472fbafd5427e48742dc 2024-11-07T14:18:55,718 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/bce9128814404198b8116ac7049455eb 2024-11-07T14:18:55,719 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/320c5e2a782140fe88b537b7d7ee1508 2024-11-07T14:18:55,720 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5841af2d948d461ea71c77485f626650 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5841af2d948d461ea71c77485f626650 2024-11-07T14:18:55,721 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/17b2c2834dde4de6a04798c09a859e8d 2024-11-07T14:18:55,722 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/ef4ba4bb0adb413483e8f60580597b5c 2024-11-07T14:18:55,723 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e556bcf1dcef4bacba05a9b75c144f97 2024-11-07T14:18:55,724 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c5406b16bfc4461584876b824c3d01ed to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c5406b16bfc4461584876b824c3d01ed 2024-11-07T14:18:55,725 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dc070d9c85ff423d9ab7eca23a1d5e1f 2024-11-07T14:18:55,726 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/fd903b4b12144a6cb81c7fa220f1d2df 2024-11-07T14:18:55,727 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/cdbaf66d99d14aed972e02f6ec57214c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/cdbaf66d99d14aed972e02f6ec57214c 2024-11-07T14:18:55,728 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/51c7e07d55814aff87b5c32872d835b8 2024-11-07T14:18:55,729 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/2babf5ea0d7c42aa96ebdec0e6ee01a8 2024-11-07T14:18:55,730 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/57cebe1a35c844239dc165cea5388bc1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/57cebe1a35c844239dc165cea5388bc1 2024-11-07T14:18:55,730 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/1ca8586ecc544d9fa9eee8ca18646b06 2024-11-07T14:18:55,731 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0a8a93f9f32246f4bff213e9b983e618 2024-11-07T14:18:55,732 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/576e9078dd8146118ed21f27949e71bf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/576e9078dd8146118ed21f27949e71bf 2024-11-07T14:18:55,733 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f0b51ab933ec4854a36f00bc97baaed1 2024-11-07T14:18:55,734 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/d44392e9b3684629b01f9550111c1126 2024-11-07T14:18:55,735 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/15b14a8e95b54b06a38b57dd7927207a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/15b14a8e95b54b06a38b57dd7927207a 2024-11-07T14:18:55,736 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/c39a9997f04b4f0ba1ecc1c2af0bf70e 2024-11-07T14:18:55,737 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/0207402a6a7449759f739876a539d773 2024-11-07T14:18:55,738 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/aa96a71180a04839936de228e54b3a5c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/aa96a71180a04839936de228e54b3a5c 2024-11-07T14:18:55,739 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/f233faac114b4e75848c35bd90236bcb 2024-11-07T14:18:55,739 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/4ba67f0bc24f42158301b3417e4dabbc 2024-11-07T14:18:55,740 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/7700c4a1f9654d81a26093074f58d6b3 2024-11-07T14:18:55,741 DEBUG [StoreCloser-TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e561d9bb0ea74600a65433cc18da437f 2024-11-07T14:18:55,745 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/recovered.edits/482.seqid, newMaxSeqId=482, maxSeqId=1 2024-11-07T14:18:55,746 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35. 2024-11-07T14:18:55,746 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1635): Region close journal for d9f2a49b2fc894490b8d6f930ab5fe35: 2024-11-07T14:18:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:55,747 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(170): Closed d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:55,748 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=d9f2a49b2fc894490b8d6f930ab5fe35, regionState=CLOSED 2024-11-07T14:18:55,750 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-07T14:18:55,750 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseRegionProcedure d9f2a49b2fc894490b8d6f930ab5fe35, server=69430dbfd73f,45917,1730989044081 in 1.0970 sec 2024-11-07T14:18:55,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-07T14:18:55,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d9f2a49b2fc894490b8d6f930ab5fe35, UNASSIGN in 1.1010 sec 2024-11-07T14:18:55,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-07T14:18:55,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1030 sec 2024-11-07T14:18:55,753 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989135753"}]},"ts":"1730989135753"} 2024-11-07T14:18:55,754 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:18:55,756 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:18:55,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1140 sec 2024-11-07T14:18:56,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-07T14:18:56,748 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-07T14:18:56,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:18:56,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,750 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T14:18:56,751 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=91, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,753 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:56,755 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/recovered.edits] 2024-11-07T14:18:56,758 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/2f8f42d99fba4475a934ea3410b51d56 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/2f8f42d99fba4475a934ea3410b51d56 2024-11-07T14:18:56,759 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/3db88b4dc37045159ce9dbb1f27624a6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/3db88b4dc37045159ce9dbb1f27624a6 2024-11-07T14:18:56,760 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e30b85d657d7472ba25dec5df33d6add to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/A/e30b85d657d7472ba25dec5df33d6add 2024-11-07T14:18:56,762 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/16bf1ac9fe86430988d505dc43212a65 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/16bf1ac9fe86430988d505dc43212a65 2024-11-07T14:18:56,763 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aadf1c1739934dcfb7775131660be7c9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/aadf1c1739934dcfb7775131660be7c9 2024-11-07T14:18:56,764 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e811f422fc524766bf5c193a4a1bf6c3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/B/e811f422fc524766bf5c193a4a1bf6c3 2024-11-07T14:18:56,766 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5b540abe3c114ac29327536381250a3f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/5b540abe3c114ac29327536381250a3f 2024-11-07T14:18:56,767 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dff18578f3544e87b5c42fb2623bd9ec to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/dff18578f3544e87b5c42fb2623bd9ec 2024-11-07T14:18:56,768 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e28bf66774af45809fdfa744310607e2 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/C/e28bf66774af45809fdfa744310607e2 2024-11-07T14:18:56,771 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/recovered.edits/482.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35/recovered.edits/482.seqid 2024-11-07T14:18:56,771 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/d9f2a49b2fc894490b8d6f930ab5fe35 2024-11-07T14:18:56,771 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:18:56,773 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=91, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,777 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:18:56,778 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:18:56,779 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=91, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,779 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:18:56,780 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989136779"}]},"ts":"9223372036854775807"} 2024-11-07T14:18:56,781 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:18:56,781 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d9f2a49b2fc894490b8d6f930ab5fe35, NAME => 'TestAcidGuarantees,,1730989101824.d9f2a49b2fc894490b8d6f930ab5fe35.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:18:56,781 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:18:56,781 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989136781"}]},"ts":"9223372036854775807"} 2024-11-07T14:18:56,782 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:18:56,785 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=91, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-11-07T14:18:56,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-07T14:18:56,852 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-07T14:18:56,863 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=236 (was 245), OpenFileDescriptor=445 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=364 (was 433), ProcessCount=11 (was 11), AvailableMemoryMB=5831 (was 6023) 2024-11-07T14:18:56,874 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=236, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=364, ProcessCount=11, AvailableMemoryMB=5831 2024-11-07T14:18:56,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:18:56,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:18:56,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:56,877 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:18:56,877 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:56,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 92 2024-11-07T14:18:56,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:56,877 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:18:56,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742151_1327 (size=960) 2024-11-07T14:18:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:57,157 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:18:57,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:57,285 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:18:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742152_1328 (size=53) 2024-11-07T14:18:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:57,690 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:18:57,691 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 4a29aaff371fad1ebbc570e5f0118052, disabling compactions & flushes 2024-11-07T14:18:57,691 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:57,691 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:57,691 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. after waiting 0 ms 2024-11-07T14:18:57,691 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:57,691 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:57,691 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:18:57,692 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:18:57,692 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989137692"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989137692"}]},"ts":"1730989137692"} 2024-11-07T14:18:57,693 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:18:57,694 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:18:57,694 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989137694"}]},"ts":"1730989137694"} 2024-11-07T14:18:57,695 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:18:57,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, ASSIGN}] 2024-11-07T14:18:57,700 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, ASSIGN 2024-11-07T14:18:57,701 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:18:57,851 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:57,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; OpenRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:58,004 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:58,007 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:58,007 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7285): Opening region: {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:18:58,008 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,008 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:18:58,008 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7327): checking encryption for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,008 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7330): checking classloading for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,009 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,010 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:58,010 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName A 2024-11-07T14:18:58,010 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:58,011 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:58,011 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,011 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:58,012 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName B 2024-11-07T14:18:58,012 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:58,012 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:58,012 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,013 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:58,013 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName C 2024-11-07T14:18:58,013 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:58,013 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:58,013 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:58,014 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,014 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,015 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:18:58,016 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1085): writing seq id for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:58,018 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:18:58,018 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1102): Opened 4a29aaff371fad1ebbc570e5f0118052; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73048281, jitterRate=0.08850421011447906}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:18:58,019 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1001): Region open journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:18:58,020 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., pid=94, masterSystemTime=1730989138004 2024-11-07T14:18:58,021 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:58,021 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:58,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:58,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-07T14:18:58,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; OpenRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 in 171 msec 2024-11-07T14:18:58,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-07T14:18:58,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, ASSIGN in 325 msec 2024-11-07T14:18:58,026 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:18:58,026 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989138026"}]},"ts":"1730989138026"} 2024-11-07T14:18:58,026 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:18:58,029 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:18:58,030 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1540 sec 2024-11-07T14:18:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-07T14:18:58,981 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-11-07T14:18:58,983 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bf5e2f0 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b82ba2a 2024-11-07T14:18:58,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3637e4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:58,989 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:58,990 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:58,991 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:18:58,992 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:18:58,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:18:58,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:18:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T14:18:59,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742153_1329 (size=996) 2024-11-07T14:18:59,405 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T14:18:59,405 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T14:18:59,407 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:18:59,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, REOPEN/MOVE}] 2024-11-07T14:18:59,409 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, REOPEN/MOVE 2024-11-07T14:18:59,409 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,410 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:18:59,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:59,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,562 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,562 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:18:59,562 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing 4a29aaff371fad1ebbc570e5f0118052, disabling compactions & flushes 2024-11-07T14:18:59,562 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,562 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,562 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. after waiting 0 ms 2024-11-07T14:18:59,562 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,566 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T14:18:59,566 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,566 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:18:59,566 WARN [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionServer(3786): Not adding moved region record: 4a29aaff371fad1ebbc570e5f0118052 to self. 2024-11-07T14:18:59,568 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,568 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=CLOSED 2024-11-07T14:18:59,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-07T14:18:59,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 in 159 msec 2024-11-07T14:18:59,570 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, REOPEN/MOVE; state=CLOSED, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=true 2024-11-07T14:18:59,721 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE; OpenRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:18:59,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,876 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,876 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7285): Opening region: {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:18:59,877 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,877 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:18:59,877 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7327): checking encryption for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,877 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7330): checking classloading for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,878 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,879 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:59,879 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName A 2024-11-07T14:18:59,880 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:59,881 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:59,881 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,882 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:59,882 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName B 2024-11-07T14:18:59,882 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:59,882 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:59,882 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,883 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:18:59,883 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a29aaff371fad1ebbc570e5f0118052 columnFamilyName C 2024-11-07T14:18:59,883 DEBUG [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:18:59,883 INFO [StoreOpener-4a29aaff371fad1ebbc570e5f0118052-1 {}] regionserver.HStore(327): Store=4a29aaff371fad1ebbc570e5f0118052/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:18:59,883 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,884 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,884 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,885 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:18:59,886 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1085): writing seq id for 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,887 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1102): Opened 4a29aaff371fad1ebbc570e5f0118052; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68063778, jitterRate=0.01422932744026184}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:18:59,888 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1001): Region open journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:18:59,888 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., pid=99, masterSystemTime=1730989139874 2024-11-07T14:18:59,889 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,890 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:18:59,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=OPEN, openSeqNum=5, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=97 2024-11-07T14:18:59,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=97, state=SUCCESS; OpenRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 in 169 msec 2024-11-07T14:18:59,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-07T14:18:59,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, REOPEN/MOVE in 484 msec 2024-11-07T14:18:59,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-07T14:18:59,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-07T14:18:59,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-11-07T14:18:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-07T14:18:59,898 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-11-07T14:18:59,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,905 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-11-07T14:18:59,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,909 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-11-07T14:18:59,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,913 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-11-07T14:18:59,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,916 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-11-07T14:18:59,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,919 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-11-07T14:18:59,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,923 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-11-07T14:18:59,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,926 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-11-07T14:18:59,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,929 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-11-07T14:18:59,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,932 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-11-07T14:18:59,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:18:59,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:18:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-07T14:18:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:18:59,938 DEBUG [hconnection-0x6121c855-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,939 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:18:59,939 DEBUG [hconnection-0x1366a2a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,939 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:18:59,939 DEBUG [hconnection-0x15bd9c22-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:18:59,940 DEBUG [hconnection-0x73d6a0cb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,941 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,941 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,941 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,942 DEBUG [hconnection-0x34ebf15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,943 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,943 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,945 DEBUG [hconnection-0x657a99eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,945 DEBUG [hconnection-0x7e9da36f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,946 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,946 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,948 DEBUG [hconnection-0x201b8726-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,949 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:18:59,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:18:59,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:18:59,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:18:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:18:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:18:59,952 DEBUG [hconnection-0x1fbeb7d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,952 DEBUG [hconnection-0x493e46b0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:18:59,953 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,956 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:18:59,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989199966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989199966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989199966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989199968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:18:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989199971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:18:59,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107090f85176400494abe2908b9b62bf014_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989139949/Put/seqid=0 2024-11-07T14:18:59,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742154_1330 (size=12154) 2024-11-07T14:19:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:19:00,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989200068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989200068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989200068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989200069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989200072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-07T14:19:00,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:00,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:19:00,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-07T14:19:00,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:00,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989200270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989200271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989200271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989200271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989200275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,396 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:00,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-07T14:19:00,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:00,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,400 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107090f85176400494abe2908b9b62bf014_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107090f85176400494abe2908b9b62bf014_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:00,401 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a52768ffc8084becb26a265852104203, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:00,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a52768ffc8084becb26a265852104203 is 175, key is test_row_0/A:col10/1730989139949/Put/seqid=0 2024-11-07T14:19:00,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742155_1331 (size=30955) 2024-11-07T14:19:00,408 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a52768ffc8084becb26a265852104203 2024-11-07T14:19:00,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/8b82dcb1ca4547ef982e357b0f2aeab4 is 50, key is test_row_0/B:col10/1730989139949/Put/seqid=0 2024-11-07T14:19:00,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742156_1332 (size=12001) 2024-11-07T14:19:00,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/8b82dcb1ca4547ef982e357b0f2aeab4 2024-11-07T14:19:00,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/490b16d49de44e13b7d2ff9244bf9f18 is 50, key is test_row_0/C:col10/1730989139949/Put/seqid=0 2024-11-07T14:19:00,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742157_1333 (size=12001) 2024-11-07T14:19:00,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/490b16d49de44e13b7d2ff9244bf9f18 2024-11-07T14:19:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a52768ffc8084becb26a265852104203 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203 2024-11-07T14:19:00,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203, entries=150, sequenceid=17, filesize=30.2 K 2024-11-07T14:19:00,480 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-07T14:19:00,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/8b82dcb1ca4547ef982e357b0f2aeab4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4 2024-11-07T14:19:00,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T14:19:00,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/490b16d49de44e13b7d2ff9244bf9f18 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18 2024-11-07T14:19:00,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18, entries=150, sequenceid=17, filesize=11.7 K 2024-11-07T14:19:00,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 4a29aaff371fad1ebbc570e5f0118052 in 542ms, sequenceid=17, compaction requested=false 2024-11-07T14:19:00,492 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-07T14:19:00,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:19:00,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:00,552 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:00,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107091f65e9fc3b43b28f78fc400382cc65_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989139965/Put/seqid=0 2024-11-07T14:19:00,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742158_1334 (size=12154) 2024-11-07T14:19:00,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:00,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:00,579 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107091f65e9fc3b43b28f78fc400382cc65_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107091f65e9fc3b43b28f78fc400382cc65_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:00,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/542f2f27d7754f23b111df5d04228534, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:00,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/542f2f27d7754f23b111df5d04228534 is 175, key is test_row_0/A:col10/1730989139965/Put/seqid=0 2024-11-07T14:19:00,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742159_1335 (size=30955) 2024-11-07T14:19:00,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989200586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989200586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989200586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989200588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989200590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989200695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989200695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989200695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989200696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989200696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989200900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989200902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989200902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989200902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989200902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:00,986 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/542f2f27d7754f23b111df5d04228534 2024-11-07T14:19:00,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/06b712a068494e33b769eacc7df3bb4e is 50, key is test_row_0/B:col10/1730989139965/Put/seqid=0 2024-11-07T14:19:00,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742160_1336 (size=12001) 2024-11-07T14:19:00,998 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/06b712a068494e33b769eacc7df3bb4e 2024-11-07T14:19:01,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0fd248cc325d41e5b6391ba8aac39b9e is 50, key is test_row_0/C:col10/1730989139965/Put/seqid=0 2024-11-07T14:19:01,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742161_1337 (size=12001) 2024-11-07T14:19:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:19:01,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989201206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989201206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989201213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989201213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989201213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,288 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:19:01,413 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0fd248cc325d41e5b6391ba8aac39b9e 2024-11-07T14:19:01,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/542f2f27d7754f23b111df5d04228534 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534 2024-11-07T14:19:01,421 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534, entries=150, sequenceid=41, filesize=30.2 K 2024-11-07T14:19:01,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/06b712a068494e33b769eacc7df3bb4e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e 2024-11-07T14:19:01,427 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e, entries=150, sequenceid=41, filesize=11.7 K 2024-11-07T14:19:01,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0fd248cc325d41e5b6391ba8aac39b9e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e 2024-11-07T14:19:01,432 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e, entries=150, sequenceid=41, filesize=11.7 K 2024-11-07T14:19:01,433 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 4a29aaff371fad1ebbc570e5f0118052 in 881ms, sequenceid=41, compaction requested=false 2024-11-07T14:19:01,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:01,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:01,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-07T14:19:01,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-07T14:19:01,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-07T14:19:01,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4970 sec 2024-11-07T14:19:01,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.5010 sec 2024-11-07T14:19:01,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:01,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:01,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:01,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107cbc467ff97d34b8ca7dfdda3671cd655_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:01,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742162_1338 (size=14594) 2024-11-07T14:19:01,753 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:01,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989201746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989201746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989201747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989201748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989201754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,769 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107cbc467ff97d34b8ca7dfdda3671cd655_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107cbc467ff97d34b8ca7dfdda3671cd655_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:01,770 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a5dbc32c0ab14f4c835e400dbbaea3b0, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:01,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a5dbc32c0ab14f4c835e400dbbaea3b0 is 175, key is test_row_0/A:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:01,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742163_1339 (size=39549) 2024-11-07T14:19:01,791 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a5dbc32c0ab14f4c835e400dbbaea3b0 2024-11-07T14:19:01,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/229408cb3d804e25bfc393a594229423 is 50, key is test_row_0/B:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:01,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742164_1340 (size=12001) 2024-11-07T14:19:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989201856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989201856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989201856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989201856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:01,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989201860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-07T14:19:02,043 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-07T14:19:02,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-07T14:19:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:02,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:02,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:02,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:02,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989202059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989202060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989202060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989202064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989202068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:02,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-07T14:19:02,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:02,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/229408cb3d804e25bfc393a594229423 2024-11-07T14:19:02,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/794715e5800d4ae7a0971bff45c75bfa is 50, key is test_row_0/C:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:02,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742165_1341 (size=12001) 2024-11-07T14:19:02,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:02,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-07T14:19:02,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:02,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989202365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989202366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989202367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989202368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989202374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-07T14:19:02,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:02,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:02,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/794715e5800d4ae7a0971bff45c75bfa 2024-11-07T14:19:02,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a5dbc32c0ab14f4c835e400dbbaea3b0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0 2024-11-07T14:19:02,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0, entries=200, sequenceid=54, filesize=38.6 K 2024-11-07T14:19:02,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/229408cb3d804e25bfc393a594229423 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423 2024-11-07T14:19:02,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423, entries=150, sequenceid=54, filesize=11.7 K 2024-11-07T14:19:02,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/794715e5800d4ae7a0971bff45c75bfa as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa 2024-11-07T14:19:02,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa, entries=150, sequenceid=54, filesize=11.7 K 2024-11-07T14:19:02,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 4a29aaff371fad1ebbc570e5f0118052 in 928ms, sequenceid=54, compaction requested=true 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:02,642 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:02,642 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:02,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:02,644 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,644 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,644 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=35.2 K 2024-11-07T14:19:02,644 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=99.1 K 2024-11-07T14:19:02,644 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0] 2024-11-07T14:19:02,644 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b82dcb1ca4547ef982e357b0f2aeab4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989139948 2024-11-07T14:19:02,645 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a52768ffc8084becb26a265852104203, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989139948 2024-11-07T14:19:02,645 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 06b712a068494e33b769eacc7df3bb4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989139965 2024-11-07T14:19:02,645 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 229408cb3d804e25bfc393a594229423, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140586 2024-11-07T14:19:02,645 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 542f2f27d7754f23b111df5d04228534, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989139965 2024-11-07T14:19:02,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:02,648 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5dbc32c0ab14f4c835e400dbbaea3b0, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140585 2024-11-07T14:19:02,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-07T14:19:02,657 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:02,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,657 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:02,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:02,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:02,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:02,658 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#290 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:02,659 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/857cf722086b4dd1be207099dfed8618 is 50, key is test_row_0/B:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:02,660 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107d8be127e42d94c9fb96a779f6aa9ce16_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:02,662 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107d8be127e42d94c9fb96a779f6aa9ce16_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:02,663 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d8be127e42d94c9fb96a779f6aa9ce16_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:02,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107607c0286bd334b3ca34f3b37fd42c194_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989141745/Put/seqid=0 2024-11-07T14:19:02,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742166_1342 (size=12104) 2024-11-07T14:19:02,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742167_1343 (size=4469) 2024-11-07T14:19:02,673 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#291 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:02,674 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/8f2e9bc10990460b89fbec85338f9f8d is 175, key is test_row_0/A:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:02,678 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/857cf722086b4dd1be207099dfed8618 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/857cf722086b4dd1be207099dfed8618 2024-11-07T14:19:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742168_1344 (size=12154) 2024-11-07T14:19:02,685 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into 857cf722086b4dd1be207099dfed8618(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:02,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:02,685 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=13, startTime=1730989142642; duration=0sec 2024-11-07T14:19:02,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:02,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:02,685 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742169_1345 (size=31058) 2024-11-07T14:19:02,689 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:02,689 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:02,689 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:02,689 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=35.2 K 2024-11-07T14:19:02,691 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 490b16d49de44e13b7d2ff9244bf9f18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1730989139948 2024-11-07T14:19:02,691 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fd248cc325d41e5b6391ba8aac39b9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1730989139965 2024-11-07T14:19:02,691 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 794715e5800d4ae7a0971bff45c75bfa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140586 2024-11-07T14:19:02,700 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/8f2e9bc10990460b89fbec85338f9f8d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d 2024-11-07T14:19:02,707 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#293 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:02,707 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/f161e62e5f6e46efbdf6b11c07b755e9 is 50, key is test_row_0/C:col10/1730989140589/Put/seqid=0 2024-11-07T14:19:02,711 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into 8f2e9bc10990460b89fbec85338f9f8d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:02,711 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:02,711 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=13, startTime=1730989142642; duration=0sec 2024-11-07T14:19:02,711 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:02,711 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:02,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742170_1346 (size=12104) 2024-11-07T14:19:02,731 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/f161e62e5f6e46efbdf6b11c07b755e9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f161e62e5f6e46efbdf6b11c07b755e9 2024-11-07T14:19:02,739 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into f161e62e5f6e46efbdf6b11c07b755e9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:02,739 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:02,740 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=13, startTime=1730989142642; duration=0sec 2024-11-07T14:19:02,740 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:02,740 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:02,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:02,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:02,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989202883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989202884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989202884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989202885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989202886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989202987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989202987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989202989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:02,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989202990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,086 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107607c0286bd334b3ca34f3b37fd42c194_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107607c0286bd334b3ca34f3b37fd42c194_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:03,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/5b732ee92b6a42ec97256f3165314c7c, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:03,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/5b732ee92b6a42ec97256f3165314c7c is 175, key is test_row_0/A:col10/1730989141745/Put/seqid=0 2024-11-07T14:19:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742171_1347 (size=30955) 2024-11-07T14:19:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:03,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989203191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989203191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989203194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989203196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989203497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989203498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989203499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:03,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989203500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:03,511 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/5b732ee92b6a42ec97256f3165314c7c 2024-11-07T14:19:03,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/87366765bfbc4644b6b66607c52a6213 is 50, key is test_row_0/B:col10/1730989141745/Put/seqid=0 2024-11-07T14:19:03,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742172_1348 (size=12001) 2024-11-07T14:19:03,528 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/87366765bfbc4644b6b66607c52a6213 2024-11-07T14:19:03,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4ad85c0514644bd28f4f1f988e681369 is 50, key is test_row_0/C:col10/1730989141745/Put/seqid=0 2024-11-07T14:19:03,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742173_1349 (size=12001) 2024-11-07T14:19:03,539 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4ad85c0514644bd28f4f1f988e681369 2024-11-07T14:19:03,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/5b732ee92b6a42ec97256f3165314c7c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c 2024-11-07T14:19:03,557 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c, entries=150, sequenceid=77, filesize=30.2 K 2024-11-07T14:19:03,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/87366765bfbc4644b6b66607c52a6213 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213 2024-11-07T14:19:03,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,563 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213, entries=150, sequenceid=77, filesize=11.7 K 2024-11-07T14:19:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4ad85c0514644bd28f4f1f988e681369 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369 2024-11-07T14:19:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,568 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369, entries=150, sequenceid=77, filesize=11.7 K 2024-11-07T14:19:03,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4a29aaff371fad1ebbc570e5f0118052 in 912ms, sequenceid=77, compaction requested=false 2024-11-07T14:19:03,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:03,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-07T14:19:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-07T14:19:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5250 sec 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.5280 sec 2024-11-07T14:19:03,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107bb882038d32142bf948fff64cfcf996f_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742174_1350 (size=12154) 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989204042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989204043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989204047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989204047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989204048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989204149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-07T14:19:04,150 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-07T14:19:04,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-07T14:19:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:04,154 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:04,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,155 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:04,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989204152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:04,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989204154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989204155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989204156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:04,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,348 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:04,352 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107bb882038d32142bf948fff64cfcf996f_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107bb882038d32142bf948fff64cfcf996f_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:04,353 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/ee1fedc2753147678401a4024cf7161c, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:04,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/ee1fedc2753147678401a4024cf7161c is 175, key is test_row_0/A:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:04,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989204353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989204357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989204360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989204360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989204361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742175_1351 (size=30951) 2024-11-07T14:19:04,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:04,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:04,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:04,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989204655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989204663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989204665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989204663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989204665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:04,770 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/ee1fedc2753147678401a4024cf7161c 2024-11-07T14:19:04,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:04,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:04,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/46e55205e667445ebf2bafaf8f676696 is 50, key is test_row_0/B:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:04,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742176_1352 (size=9657) 2024-11-07T14:19:04,923 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:04,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:04,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:04,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:04,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:05,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:05,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989205163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:05,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989205172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:05,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989205173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:05,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989205174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:05,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989205174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/46e55205e667445ebf2bafaf8f676696 2024-11-07T14:19:05,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:05,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:05,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:05,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0e25b5c6a6cd4e6a903912cfe2067396 is 50, key is test_row_0/C:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:05,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742177_1353 (size=9657) 2024-11-07T14:19:05,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0e25b5c6a6cd4e6a903912cfe2067396 2024-11-07T14:19:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:05,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/ee1fedc2753147678401a4024cf7161c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c 2024-11-07T14:19:05,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c, entries=150, sequenceid=94, filesize=30.2 K 2024-11-07T14:19:05,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/46e55205e667445ebf2bafaf8f676696 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696 2024-11-07T14:19:05,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696, entries=100, sequenceid=94, filesize=9.4 K 2024-11-07T14:19:05,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/0e25b5c6a6cd4e6a903912cfe2067396 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396 2024-11-07T14:19:05,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396, entries=100, sequenceid=94, filesize=9.4 K 2024-11-07T14:19:05,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 4a29aaff371fad1ebbc570e5f0118052 in 1356ms, sequenceid=94, compaction requested=true 2024-11-07T14:19:05,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:05,285 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:05,286 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:05,287 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:05,287 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,287 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=90.8 K 2024-11-07T14:19:05,287 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,287 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c] 2024-11-07T14:19:05,287 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f2e9bc10990460b89fbec85338f9f8d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140586 2024-11-07T14:19:05,288 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b732ee92b6a42ec97256f3165314c7c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730989141745 2024-11-07T14:19:05,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:05,289 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee1fedc2753147678401a4024cf7161c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989142883 2024-11-07T14:19:05,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:05,289 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:05,290 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:05,291 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:05,291 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,291 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/857cf722086b4dd1be207099dfed8618, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=33.0 K 2024-11-07T14:19:05,291 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 857cf722086b4dd1be207099dfed8618, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140586 2024-11-07T14:19:05,292 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 87366765bfbc4644b6b66607c52a6213, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730989141745 2024-11-07T14:19:05,292 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 46e55205e667445ebf2bafaf8f676696, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989142884 2024-11-07T14:19:05,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:05,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:05,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:05,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:05,313 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:05,315 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:05,315 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/d4d89fe6fba246aeaf9836561171e40a is 50, key is test_row_0/B:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:05,329 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107d463c383984d43ada4af057e88338336_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:05,334 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107d463c383984d43ada4af057e88338336_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:05,334 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d463c383984d43ada4af057e88338336_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:05,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742178_1354 (size=12207) 2024-11-07T14:19:05,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:05,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742179_1355 (size=4469) 2024-11-07T14:19:05,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-07T14:19:05,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,388 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:05,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:05,397 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#299 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:05,398 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d19d7ab7589e4348a4400a863d656c9b is 175, key is test_row_0/A:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742180_1356 (size=31268) 2024-11-07T14:19:05,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078e52e20ee26b458b967a76ab785296f4_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989144046/Put/seqid=0 2024-11-07T14:19:05,454 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d19d7ab7589e4348a4400a863d656c9b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b 2024-11-07T14:19:05,464 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into d19d7ab7589e4348a4400a863d656c9b(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:05,464 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:05,464 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=13, startTime=1730989145285; duration=0sec 2024-11-07T14:19:05,464 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:05,464 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:05,464 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:05,466 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:05,466 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:05,466 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:05,466 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f161e62e5f6e46efbdf6b11c07b755e9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=33.0 K 2024-11-07T14:19:05,467 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f161e62e5f6e46efbdf6b11c07b755e9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989140586 2024-11-07T14:19:05,468 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ad85c0514644bd28f4f1f988e681369, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1730989141745 2024-11-07T14:19:05,468 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e25b5c6a6cd4e6a903912cfe2067396, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989142884 2024-11-07T14:19:05,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742181_1357 (size=12154) 2024-11-07T14:19:05,506 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:05,507 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/ac6a992a3cc04a6b9c5a1bbcfc284607 is 50, key is test_row_0/C:col10/1730989142884/Put/seqid=0 2024-11-07T14:19:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742182_1358 (size=12207) 2024-11-07T14:19:05,794 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/d4d89fe6fba246aeaf9836561171e40a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/d4d89fe6fba246aeaf9836561171e40a 2024-11-07T14:19:05,799 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into d4d89fe6fba246aeaf9836561171e40a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:05,799 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:05,799 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=13, startTime=1730989145289; duration=0sec 2024-11-07T14:19:05,799 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:05,799 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:05,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:05,902 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078e52e20ee26b458b967a76ab785296f4_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e52e20ee26b458b967a76ab785296f4_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:05,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a793f5d379c64737b6d30fece7c53154, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:05,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a793f5d379c64737b6d30fece7c53154 is 175, key is test_row_0/A:col10/1730989144046/Put/seqid=0 2024-11-07T14:19:05,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742183_1359 (size=30955) 2024-11-07T14:19:05,918 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a793f5d379c64737b6d30fece7c53154 2024-11-07T14:19:05,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e2669744124644b888b17a59671662ec is 50, key is test_row_0/B:col10/1730989144046/Put/seqid=0 2024-11-07T14:19:05,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742184_1360 (size=12001) 2024-11-07T14:19:05,945 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e2669744124644b888b17a59671662ec 2024-11-07T14:19:05,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/cca6c99e67eb482eb0e5a070c51310bf is 50, key is test_row_0/C:col10/1730989144046/Put/seqid=0 2024-11-07T14:19:05,963 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/ac6a992a3cc04a6b9c5a1bbcfc284607 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ac6a992a3cc04a6b9c5a1bbcfc284607 2024-11-07T14:19:05,980 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into ac6a992a3cc04a6b9c5a1bbcfc284607(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:05,980 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:05,980 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=13, startTime=1730989145296; duration=0sec 2024-11-07T14:19:05,980 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:05,980 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:06,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742185_1361 (size=12001) 2024-11-07T14:19:06,006 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/cca6c99e67eb482eb0e5a070c51310bf 2024-11-07T14:19:06,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/a793f5d379c64737b6d30fece7c53154 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154 2024-11-07T14:19:06,021 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154, entries=150, sequenceid=118, filesize=30.2 K 2024-11-07T14:19:06,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e2669744124644b888b17a59671662ec as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec 2024-11-07T14:19:06,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,026 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec, entries=150, sequenceid=118, filesize=11.7 K 2024-11-07T14:19:06,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/cca6c99e67eb482eb0e5a070c51310bf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,033 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf, entries=150, sequenceid=118, filesize=11.7 K 2024-11-07T14:19:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,034 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 4a29aaff371fad1ebbc570e5f0118052 in 646ms, sequenceid=118, compaction requested=false 2024-11-07T14:19:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:06,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-07T14:19:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-07T14:19:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-07T14:19:06,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8810 sec 2024-11-07T14:19:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.8870 sec 2024-11-07T14:19:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-07T14:19:06,259 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-07T14:19:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-07T14:19:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,262 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,262 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:06,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:06,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:06,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b2872471091d46a1bf91b6a6c30242ff_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742187_1363 (size=24558) 2024-11-07T14:19:06,334 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,342 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b2872471091d46a1bf91b6a6c30242ff_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b2872471091d46a1bf91b6a6c30242ff_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:06,343 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/59a9aec5ddba4a0fa561268ac92b2b66, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989206330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/59a9aec5ddba4a0fa561268ac92b2b66 is 175, key is test_row_0/A:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742186_1362 (size=74195) 2024-11-07T14:19:06,347 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/59a9aec5ddba4a0fa561268ac92b2b66 2024-11-07T14:19:06,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989206341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989206342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989206344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989206344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/65595487756c4f1fa50ba21364d25010 is 50, key is test_row_0/B:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742188_1364 (size=12151) 2024-11-07T14:19:06,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/65595487756c4f1fa50ba21364d25010 2024-11-07T14:19:06,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/ee6394027cbb45e28f6d25fe4153437e is 50, key is test_row_0/C:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742189_1365 (size=12151) 2024-11-07T14:19:06,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/ee6394027cbb45e28f6d25fe4153437e 2024-11-07T14:19:06,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/59a9aec5ddba4a0fa561268ac92b2b66 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66 2024-11-07T14:19:06,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:06,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:06,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66, entries=400, sequenceid=132, filesize=72.5 K 2024-11-07T14:19:06,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/65595487756c4f1fa50ba21364d25010 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010 2024-11-07T14:19:06,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010, entries=150, sequenceid=132, filesize=11.9 K 2024-11-07T14:19:06,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/ee6394027cbb45e28f6d25fe4153437e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e 2024-11-07T14:19:06,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e, entries=150, sequenceid=132, filesize=11.9 K 2024-11-07T14:19:06,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 4a29aaff371fad1ebbc570e5f0118052 in 148ms, sequenceid=132, compaction requested=true 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:06,434 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:06,434 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:06,436 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136418 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:06,436 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:06,436 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,437 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=133.2 K 2024-11-07T14:19:06,437 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,437 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66] 2024-11-07T14:19:06,437 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:06,437 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:06,437 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,437 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/d4d89fe6fba246aeaf9836561171e40a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=35.5 K 2024-11-07T14:19:06,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d19d7ab7589e4348a4400a863d656c9b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989141746 2024-11-07T14:19:06,438 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d4d89fe6fba246aeaf9836561171e40a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989141746 2024-11-07T14:19:06,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a793f5d379c64737b6d30fece7c53154, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1730989144040 2024-11-07T14:19:06,438 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e2669744124644b888b17a59671662ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1730989144040 2024-11-07T14:19:06,439 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59a9aec5ddba4a0fa561268ac92b2b66, keycount=400, bloomtype=ROW, size=72.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146222 2024-11-07T14:19:06,439 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 65595487756c4f1fa50ba21364d25010, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146270 2024-11-07T14:19:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:06,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:06,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:06,456 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,461 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#309 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:06,462 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/1bb0285d39054af8976251e9ee03e277 is 50, key is test_row_0/B:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,471 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411071f34079621074886a32ef5e4dd960fc5_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,474 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411071f34079621074886a32ef5e4dd960fc5_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,474 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411071f34079621074886a32ef5e4dd960fc5_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ba616d51ed0548dbb5ddbedaea3c9ce8_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989146342/Put/seqid=0 2024-11-07T14:19:06,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989206466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989206472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742190_1366 (size=12459) 2024-11-07T14:19:06,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989206486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989206486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989206486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742191_1367 (size=14794) 2024-11-07T14:19:06,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742192_1368 (size=4469) 2024-11-07T14:19:06,525 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#308 average throughput is 0.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:06,526 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d82b532ffa53417bb161d2078d782ee6 is 175, key is test_row_0/A:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742193_1369 (size=31413) 2024-11-07T14:19:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:06,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:06,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:06,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989206587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989206588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989206596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989206596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989206597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:06,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:06,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989206796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989206796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989206805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989206805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989206805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:06,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:06,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:06,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:06,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:06,894 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/1bb0285d39054af8976251e9ee03e277 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/1bb0285d39054af8976251e9ee03e277 2024-11-07T14:19:06,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into 1bb0285d39054af8976251e9ee03e277(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:06,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:06,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=13, startTime=1730989146434; duration=0sec 2024-11-07T14:19:06,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:06,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:06,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:06,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:06,901 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:06,901 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:06,901 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ac6a992a3cc04a6b9c5a1bbcfc284607, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=35.5 K 2024-11-07T14:19:06,902 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ac6a992a3cc04a6b9c5a1bbcfc284607, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1730989141746 2024-11-07T14:19:06,902 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cca6c99e67eb482eb0e5a070c51310bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1730989144040 2024-11-07T14:19:06,902 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ee6394027cbb45e28f6d25fe4153437e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146270 2024-11-07T14:19:06,907 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:06,908 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/f79f6667a57f46bcb8761baac6fb335b is 50, key is test_row_0/C:col10/1730989146282/Put/seqid=0 2024-11-07T14:19:06,910 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:06,913 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ba616d51ed0548dbb5ddbedaea3c9ce8_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ba616d51ed0548dbb5ddbedaea3c9ce8_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:06,914 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/37169fb9a5594276accd9251b454d09e, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:06,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/37169fb9a5594276accd9251b454d09e is 175, key is test_row_0/A:col10/1730989146342/Put/seqid=0 2024-11-07T14:19:06,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742194_1370 (size=12459) 2024-11-07T14:19:06,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742195_1371 (size=39749) 2024-11-07T14:19:06,922 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/37169fb9a5594276accd9251b454d09e 2024-11-07T14:19:06,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e070fb2a6c1d4003ae86144fc9674764 is 50, key is test_row_0/B:col10/1730989146342/Put/seqid=0 2024-11-07T14:19:06,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742196_1372 (size=12151) 2024-11-07T14:19:06,954 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d82b532ffa53417bb161d2078d782ee6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6 2024-11-07T14:19:06,958 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into d82b532ffa53417bb161d2078d782ee6(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:06,958 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:06,958 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=13, startTime=1730989146434; duration=0sec 2024-11-07T14:19:06,958 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:06,958 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:07,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:07,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989207102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989207104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989207113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989207115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989207115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:07,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,325 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/f79f6667a57f46bcb8761baac6fb335b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f79f6667a57f46bcb8761baac6fb335b 2024-11-07T14:19:07,329 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into f79f6667a57f46bcb8761baac6fb335b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:07,330 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:07,330 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=13, startTime=1730989146434; duration=0sec 2024-11-07T14:19:07,330 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:07,330 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:07,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e070fb2a6c1d4003ae86144fc9674764 2024-11-07T14:19:07,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:07,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/750039bc94184818bb8a4604576b3473 is 50, key is test_row_0/C:col10/1730989146342/Put/seqid=0 2024-11-07T14:19:07,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742197_1373 (size=12151) 2024-11-07T14:19:07,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:07,493 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989207608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989207609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989207616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989207618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989207622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:07,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:07,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/750039bc94184818bb8a4604576b3473 2024-11-07T14:19:07,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/37169fb9a5594276accd9251b454d09e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e 2024-11-07T14:19:07,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e, entries=200, sequenceid=158, filesize=38.8 K 2024-11-07T14:19:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/e070fb2a6c1d4003ae86144fc9674764 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764 2024-11-07T14:19:07,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764, entries=150, sequenceid=158, filesize=11.9 K 2024-11-07T14:19:07,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/750039bc94184818bb8a4604576b3473 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473 2024-11-07T14:19:07,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473, entries=150, sequenceid=158, filesize=11.9 K 2024-11-07T14:19:07,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 4a29aaff371fad1ebbc570e5f0118052 in 1325ms, sequenceid=158, compaction requested=false 2024-11-07T14:19:07,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:07,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:07,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-07T14:19:07,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:07,800 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:07,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107517b8d014d714a229e43e2f3e03b359c_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989146481/Put/seqid=0 2024-11-07T14:19:07,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742198_1374 (size=12304) 2024-11-07T14:19:08,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:08,226 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107517b8d014d714a229e43e2f3e03b359c_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107517b8d014d714a229e43e2f3e03b359c_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:08,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/6e2dd88832ae493693edf333388f386e, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:08,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/6e2dd88832ae493693edf333388f386e is 175, key is test_row_0/A:col10/1730989146481/Put/seqid=0 2024-11-07T14:19:08,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742199_1375 (size=31105) 2024-11-07T14:19:08,244 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/6e2dd88832ae493693edf333388f386e 2024-11-07T14:19:08,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/bef06d500e284b509142b8b6cd1e3440 is 50, key is test_row_0/B:col10/1730989146481/Put/seqid=0 2024-11-07T14:19:08,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742200_1376 (size=12151) 2024-11-07T14:19:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:08,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:08,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989208643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989208646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989208647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989208648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989208649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,657 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/bef06d500e284b509142b8b6cd1e3440 2024-11-07T14:19:08,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/a3dd20bf8ac745b79a913e4f815bf882 is 50, key is test_row_0/C:col10/1730989146481/Put/seqid=0 2024-11-07T14:19:08,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742201_1377 (size=12151) 2024-11-07T14:19:08,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989208750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989208750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989208752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989208754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989208754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989208954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989208955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989208956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989208957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:08,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:08,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989208957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,069 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/a3dd20bf8ac745b79a913e4f815bf882 2024-11-07T14:19:09,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/6e2dd88832ae493693edf333388f386e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e 2024-11-07T14:19:09,076 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e, entries=150, sequenceid=173, filesize=30.4 K 2024-11-07T14:19:09,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/bef06d500e284b509142b8b6cd1e3440 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440 2024-11-07T14:19:09,080 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440, entries=150, sequenceid=173, filesize=11.9 K 2024-11-07T14:19:09,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/a3dd20bf8ac745b79a913e4f815bf882 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882 2024-11-07T14:19:09,084 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882, entries=150, sequenceid=173, filesize=11.9 K 2024-11-07T14:19:09,085 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4a29aaff371fad1ebbc570e5f0118052 in 1285ms, sequenceid=173, compaction requested=true 2024-11-07T14:19:09,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:09,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:09,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-07T14:19:09,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-07T14:19:09,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-07T14:19:09,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8230 sec 2024-11-07T14:19:09,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.8280 sec 2024-11-07T14:19:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:09,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:09,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076069d012c19e4008bca684564e5a592b_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989209267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989209270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742202_1378 (size=12304) 2024-11-07T14:19:09,276 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:09,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989209272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,279 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076069d012c19e4008bca684564e5a592b_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076069d012c19e4008bca684564e5a592b_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:09,280 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f8dcdaf609c3478ab4d1a441101bc481, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:09,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989209273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f8dcdaf609c3478ab4d1a441101bc481 is 175, key is test_row_0/A:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989209273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742203_1379 (size=31105) 2024-11-07T14:19:09,291 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f8dcdaf609c3478ab4d1a441101bc481 2024-11-07T14:19:09,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/638945916f2045a59ed95881e80d1e84 is 50, key is test_row_0/B:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742204_1380 (size=12151) 2024-11-07T14:19:09,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989209375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989209376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989209379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989209381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989209381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989209578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989209578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989209583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989209584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989209587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/638945916f2045a59ed95881e80d1e84 2024-11-07T14:19:09,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/10b6a1b700a046d395db9ec48b869b54 is 50, key is test_row_0/C:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742205_1381 (size=12151) 2024-11-07T14:19:09,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/10b6a1b700a046d395db9ec48b869b54 2024-11-07T14:19:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f8dcdaf609c3478ab4d1a441101bc481 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481 2024-11-07T14:19:09,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481, entries=150, sequenceid=199, filesize=30.4 K 2024-11-07T14:19:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/638945916f2045a59ed95881e80d1e84 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84 2024-11-07T14:19:09,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84, entries=150, sequenceid=199, filesize=11.9 K 2024-11-07T14:19:09,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/10b6a1b700a046d395db9ec48b869b54 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54 2024-11-07T14:19:09,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54, entries=150, sequenceid=199, filesize=11.9 K 2024-11-07T14:19:09,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 4a29aaff371fad1ebbc570e5f0118052 in 472ms, sequenceid=199, compaction requested=true 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:09,736 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:09,736 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:09,737 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133372 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:09,737 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:09,737 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:09,737 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=130.2 K 2024-11-07T14:19:09,737 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:09,737 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481] 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:09,738 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d82b532ffa53417bb161d2078d782ee6, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146270 2024-11-07T14:19:09,738 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/1bb0285d39054af8976251e9ee03e277, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=47.8 K 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bb0285d39054af8976251e9ee03e277, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146270 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37169fb9a5594276accd9251b454d09e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1730989146341 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e070fb2a6c1d4003ae86144fc9674764, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1730989146342 2024-11-07T14:19:09,738 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e2dd88832ae493693edf333388f386e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730989146470 2024-11-07T14:19:09,739 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting bef06d500e284b509142b8b6cd1e3440, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730989146470 2024-11-07T14:19:09,739 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8dcdaf609c3478ab4d1a441101bc481, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:09,739 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 638945916f2045a59ed95881e80d1e84, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:09,745 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:09,750 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411072b4da342a81c477391ff36c01a883cc2_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:09,751 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#321 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:09,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411072b4da342a81c477391ff36c01a883cc2_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:09,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411072b4da342a81c477391ff36c01a883cc2_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:09,752 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/8c10876db3f242fb88d64d16686f16d1 is 50, key is test_row_0/B:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742207_1383 (size=12595) 2024-11-07T14:19:09,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742206_1382 (size=4469) 2024-11-07T14:19:09,763 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/8c10876db3f242fb88d64d16686f16d1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8c10876db3f242fb88d64d16686f16d1 2024-11-07T14:19:09,767 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into 8c10876db3f242fb88d64d16686f16d1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:09,767 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:09,767 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=12, startTime=1730989149736; duration=0sec 2024-11-07T14:19:09,767 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:09,767 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:09,767 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:09,768 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:09,768 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:09,768 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:09,768 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f79f6667a57f46bcb8761baac6fb335b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=47.8 K 2024-11-07T14:19:09,768 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f79f6667a57f46bcb8761baac6fb335b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1730989146270 2024-11-07T14:19:09,769 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 750039bc94184818bb8a4604576b3473, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1730989146342 2024-11-07T14:19:09,769 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a3dd20bf8ac745b79a913e4f815bf882, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1730989146470 2024-11-07T14:19:09,769 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 10b6a1b700a046d395db9ec48b869b54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:09,776 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#322 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:09,776 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/267ce9293c0c4775a61fb4f06be88762 is 50, key is test_row_0/C:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:09,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742208_1384 (size=12595) 2024-11-07T14:19:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:09,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:09,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:09,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c3a9c1f31f7e46ea8d34446b0e74c4ed_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989149272/Put/seqid=0 2024-11-07T14:19:09,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742209_1385 (size=14794) 2024-11-07T14:19:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989209918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989209919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989209923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989209925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:09,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989209925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989210026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989210026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989210026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989210031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989210031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,160 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#320 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:10,161 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/fa5c2e9e39974106a2df5bb8b703f679 is 175, key is test_row_0/A:col10/1730989148645/Put/seqid=0 2024-11-07T14:19:10,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742210_1386 (size=31549) 2024-11-07T14:19:10,170 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/fa5c2e9e39974106a2df5bb8b703f679 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679 2024-11-07T14:19:10,175 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into fa5c2e9e39974106a2df5bb8b703f679(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:10,175 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=12, startTime=1730989149736; duration=0sec 2024-11-07T14:19:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:10,189 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/267ce9293c0c4775a61fb4f06be88762 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/267ce9293c0c4775a61fb4f06be88762 2024-11-07T14:19:10,193 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into 267ce9293c0c4775a61fb4f06be88762(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:10,193 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:10,193 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=12, startTime=1730989149736; duration=0sec 2024-11-07T14:19:10,193 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:10,193 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:10,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989210229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989210231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989210231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989210236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989210236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,303 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:10,306 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c3a9c1f31f7e46ea8d34446b0e74c4ed_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c3a9c1f31f7e46ea8d34446b0e74c4ed_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:10,307 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/7c2580f72e8d408f94ee5d7e91f94991, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:10,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/7c2580f72e8d408f94ee5d7e91f94991 is 175, key is test_row_0/A:col10/1730989149272/Put/seqid=0 2024-11-07T14:19:10,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742211_1387 (size=39749) 2024-11-07T14:19:10,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-07T14:19:10,367 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-07T14:19:10,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:10,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-07T14:19:10,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:10,370 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:10,371 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:10,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:10,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:10,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:10,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989210536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989210536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989210537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989210541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:10,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989210542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:10,675 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:10,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:10,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,713 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/7c2580f72e8d408f94ee5d7e91f94991 2024-11-07T14:19:10,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3f6726a08f2b4f2588414c60ce4c6541 is 50, key is test_row_0/B:col10/1730989149272/Put/seqid=0 2024-11-07T14:19:10,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742212_1388 (size=12151) 2024-11-07T14:19:10,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:10,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:10,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:10,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:10,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:10,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:10,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:11,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989211038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:11,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989211040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:11,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989211042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989211048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989211049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3f6726a08f2b4f2588414c60ce4c6541 2024-11-07T14:19:11,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/bd154a7052ad45cc8ce20129f540db16 is 50, key is test_row_0/C:col10/1730989149272/Put/seqid=0 2024-11-07T14:19:11,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742213_1389 (size=12151) 2024-11-07T14:19:11,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:11,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:11,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,438 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:11,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:11,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:11,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/bd154a7052ad45cc8ce20129f540db16 2024-11-07T14:19:11,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/7c2580f72e8d408f94ee5d7e91f94991 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991 2024-11-07T14:19:11,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991, entries=200, sequenceid=211, filesize=38.8 K 2024-11-07T14:19:11,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3f6726a08f2b4f2588414c60ce4c6541 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541 2024-11-07T14:19:11,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541, entries=150, sequenceid=211, filesize=11.9 K 2024-11-07T14:19:11,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/bd154a7052ad45cc8ce20129f540db16 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16 2024-11-07T14:19:11,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16, entries=150, sequenceid=211, filesize=11.9 K 2024-11-07T14:19:11,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 4a29aaff371fad1ebbc570e5f0118052 in 1664ms, sequenceid=211, compaction requested=false 2024-11-07T14:19:11,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:11,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:11,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:11,592 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:11,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:11,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073a9bf13d465c412da4c446cfb92b6725_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989149918/Put/seqid=0 2024-11-07T14:19:11,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742214_1390 (size=12304) 2024-11-07T14:19:12,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:12,008 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411073a9bf13d465c412da4c446cfb92b6725_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073a9bf13d465c412da4c446cfb92b6725_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:12,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/bc34a71dba3d4b3bb3d59c819ed1d099, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:12,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/bc34a71dba3d4b3bb3d59c819ed1d099 is 175, key is test_row_0/A:col10/1730989149918/Put/seqid=0 2024-11-07T14:19:12,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742215_1391 (size=31105) 2024-11-07T14:19:12,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:12,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989212054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989212058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989212058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989212059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989212059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989212160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989212163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989212166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989212167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989212167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989212364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989212367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989212369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989212370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989212371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,414 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/bc34a71dba3d4b3bb3d59c819ed1d099 2024-11-07T14:19:12,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/b4c5eaed581a40bcbb723f7b3a4d44c5 is 50, key is test_row_0/B:col10/1730989149918/Put/seqid=0 2024-11-07T14:19:12,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742216_1392 (size=12151) 2024-11-07T14:19:12,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:12,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989212669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989212671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989212674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989212675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:12,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989212678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:12,825 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/b4c5eaed581a40bcbb723f7b3a4d44c5 2024-11-07T14:19:12,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/98d4bc8d0547457c80dabbe05d2a93d1 is 50, key is test_row_0/C:col10/1730989149918/Put/seqid=0 2024-11-07T14:19:12,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742217_1393 (size=12151) 2024-11-07T14:19:13,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989213172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:13,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989213178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:13,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989213179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:13,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989213179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:13,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989213183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:13,236 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/98d4bc8d0547457c80dabbe05d2a93d1 2024-11-07T14:19:13,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/bc34a71dba3d4b3bb3d59c819ed1d099 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099 2024-11-07T14:19:13,243 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099, entries=150, sequenceid=239, filesize=30.4 K 2024-11-07T14:19:13,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/b4c5eaed581a40bcbb723f7b3a4d44c5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5 2024-11-07T14:19:13,247 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5, entries=150, sequenceid=239, filesize=11.9 K 2024-11-07T14:19:13,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/98d4bc8d0547457c80dabbe05d2a93d1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1 2024-11-07T14:19:13,251 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1, entries=150, sequenceid=239, filesize=11.9 K 2024-11-07T14:19:13,252 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 4a29aaff371fad1ebbc570e5f0118052 in 1659ms, sequenceid=239, compaction requested=true 2024-11-07T14:19:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-07T14:19:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-07T14:19:13,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-07T14:19:13,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8820 sec 2024-11-07T14:19:13,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.8850 sec 2024-11-07T14:19:14,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:14,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:14,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:14,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079326e8be43f74cf8a7114ed11da98ff8_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:14,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742218_1394 (size=14794) 2024-11-07T14:19:14,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989214211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989214212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989214212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989214212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989214213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989214314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989214320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989214320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989214322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989214322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-07T14:19:14,474 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-07T14:19:14,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-07T14:19:14,477 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:14,478 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:14,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:14,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989214519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989214524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989214525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989214525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989214526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:14,603 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:14,611 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411079326e8be43f74cf8a7114ed11da98ff8_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079326e8be43f74cf8a7114ed11da98ff8_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:14,613 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/e6031128ae354b12b286d024ff2403ee, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:14,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/e6031128ae354b12b286d024ff2403ee is 175, key is test_row_0/A:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:14,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742219_1395 (size=39749) 2024-11-07T14:19:14,621 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/e6031128ae354b12b286d024ff2403ee 2024-11-07T14:19:14,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/fde68338c7ed44c8b00e3df4f93a8b04 is 50, key is test_row_0/B:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:14,630 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:14,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:14,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742220_1396 (size=12151) 2024-11-07T14:19:14,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:14,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989214825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989214830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989214830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989214831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:14,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989214831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,937 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:14,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:14,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:14,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:14,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:14,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/fde68338c7ed44c8b00e3df4f93a8b04 2024-11-07T14:19:15,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/1feb6688993948ce88d9b9c5673974f8 is 50, key is test_row_0/C:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:15,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742221_1397 (size=12151) 2024-11-07T14:19:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:15,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:15,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:15,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:15,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:15,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:15,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989215334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989215335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989215337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989215338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989215339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:15,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:15,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/1feb6688993948ce88d9b9c5673974f8 2024-11-07T14:19:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/e6031128ae354b12b286d024ff2403ee as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee 2024-11-07T14:19:15,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee, entries=200, sequenceid=252, filesize=38.8 K 2024-11-07T14:19:15,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/fde68338c7ed44c8b00e3df4f93a8b04 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04 2024-11-07T14:19:15,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:19:15,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/1feb6688993948ce88d9b9c5673974f8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8 2024-11-07T14:19:15,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8, entries=150, sequenceid=252, filesize=11.9 K 2024-11-07T14:19:15,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 4a29aaff371fad1ebbc570e5f0118052 in 1284ms, sequenceid=252, compaction requested=true 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:15,465 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:15,465 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:15,466 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142152 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:15,466 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:15,466 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:15,466 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:15,466 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,466 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,466 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=138.8 K 2024-11-07T14:19:15,466 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,466 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8c10876db3f242fb88d64d16686f16d1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=47.9 K 2024-11-07T14:19:15,466 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee] 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c10876db3f242fb88d64d16686f16d1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa5c2e9e39974106a2df5bb8b703f679, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c2580f72e8d408f94ee5d7e91f94991, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1730989149269 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f6726a08f2b4f2588414c60ce4c6541, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1730989149269 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting b4c5eaed581a40bcbb723f7b3a4d44c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1730989149915 2024-11-07T14:19:15,467 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc34a71dba3d4b3bb3d59c819ed1d099, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1730989149915 2024-11-07T14:19:15,468 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6031128ae354b12b286d024ff2403ee, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:15,468 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting fde68338c7ed44c8b00e3df4f93a8b04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:15,474 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:15,477 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:15,477 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/295d194b2af3439f84aa86c4b0ba0635 is 50, key is test_row_0/B:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:15,478 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107083dbb0d8914434ab6af34095835a40c_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:15,480 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107083dbb0d8914434ab6af34095835a40c_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:15,480 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107083dbb0d8914434ab6af34095835a40c_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742222_1398 (size=12731) 2024-11-07T14:19:15,488 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/295d194b2af3439f84aa86c4b0ba0635 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/295d194b2af3439f84aa86c4b0ba0635 2024-11-07T14:19:15,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742223_1399 (size=4469) 2024-11-07T14:19:15,493 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into 295d194b2af3439f84aa86c4b0ba0635(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:15,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:15,493 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=12, startTime=1730989155465; duration=0sec 2024-11-07T14:19:15,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:15,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:15,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:15,495 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:15,495 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:15,495 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,495 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/267ce9293c0c4775a61fb4f06be88762, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=47.9 K 2024-11-07T14:19:15,495 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 267ce9293c0c4775a61fb4f06be88762, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1730989148645 2024-11-07T14:19:15,496 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting bd154a7052ad45cc8ce20129f540db16, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1730989149269 2024-11-07T14:19:15,496 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 98d4bc8d0547457c80dabbe05d2a93d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1730989149915 2024-11-07T14:19:15,496 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1feb6688993948ce88d9b9c5673974f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:15,503 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:15,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/38fff3b8020940f7999d27b06da266ad is 50, key is test_row_0/C:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:15,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742224_1400 (size=12731) 2024-11-07T14:19:15,518 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/38fff3b8020940f7999d27b06da266ad as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/38fff3b8020940f7999d27b06da266ad 2024-11-07T14:19:15,522 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into 38fff3b8020940f7999d27b06da266ad(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:15,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:15,522 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=12, startTime=1730989155465; duration=0sec 2024-11-07T14:19:15,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:15,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:15,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:15,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-07T14:19:15,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:15,549 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:15,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:15,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c4932847ef1344a796bef2f624fefcd9_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989154211/Put/seqid=0 2024-11-07T14:19:15,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742225_1401 (size=12454) 2024-11-07T14:19:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:15,890 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#332 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:15,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f2d6faf86c44408cba3ff1b959354588 is 175, key is test_row_0/A:col10/1730989152046/Put/seqid=0 2024-11-07T14:19:15,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742226_1402 (size=31685) 2024-11-07T14:19:15,899 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/f2d6faf86c44408cba3ff1b959354588 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588 2024-11-07T14:19:15,903 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into f2d6faf86c44408cba3ff1b959354588(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:15,903 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:15,903 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=12, startTime=1730989155465; duration=0sec 2024-11-07T14:19:15,903 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:15,903 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:15,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:15,964 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c4932847ef1344a796bef2f624fefcd9_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c4932847ef1344a796bef2f624fefcd9_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:15,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1632e9b54cd94860ba2d23318b72cdc4, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:15,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1632e9b54cd94860ba2d23318b72cdc4 is 175, key is test_row_0/A:col10/1730989154211/Put/seqid=0 2024-11-07T14:19:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742227_1403 (size=31255) 2024-11-07T14:19:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:16,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989216353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989216354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989216354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989216355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989216355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,370 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1632e9b54cd94860ba2d23318b72cdc4 2024-11-07T14:19:16,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/581112e436dc4dd7be1f191c7b7df145 is 50, key is test_row_0/B:col10/1730989154211/Put/seqid=0 2024-11-07T14:19:16,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742228_1404 (size=12301) 2024-11-07T14:19:16,381 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/581112e436dc4dd7be1f191c7b7df145 2024-11-07T14:19:16,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4f727593f92441e6b174bcb861f631ff is 50, key is test_row_0/C:col10/1730989154211/Put/seqid=0 2024-11-07T14:19:16,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742229_1405 (size=12301) 2024-11-07T14:19:16,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989216456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989216459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989216460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989216460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989216460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:16,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989216659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989216665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989216666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989216667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989216667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,796 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4f727593f92441e6b174bcb861f631ff 2024-11-07T14:19:16,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1632e9b54cd94860ba2d23318b72cdc4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4 2024-11-07T14:19:16,803 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4, entries=150, sequenceid=277, filesize=30.5 K 2024-11-07T14:19:16,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/581112e436dc4dd7be1f191c7b7df145 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145 2024-11-07T14:19:16,807 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145, entries=150, sequenceid=277, filesize=12.0 K 2024-11-07T14:19:16,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/4f727593f92441e6b174bcb861f631ff as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff 2024-11-07T14:19:16,811 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff, entries=150, sequenceid=277, filesize=12.0 K 2024-11-07T14:19:16,812 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 4a29aaff371fad1ebbc570e5f0118052 in 1262ms, sequenceid=277, compaction requested=false 2024-11-07T14:19:16,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:16,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:16,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-07T14:19:16,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-07T14:19:16,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-07T14:19:16,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3350 sec 2024-11-07T14:19:16,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.3390 sec 2024-11-07T14:19:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:16,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T14:19:16,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:16,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:16,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107520bd695a55942459e4bc8976e05f3fd_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742230_1406 (size=14994) 2024-11-07T14:19:16,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989216991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989216991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989216992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989216992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:16,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:16,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989216993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989217098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989217098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989217098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989217098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989217099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989217301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989217301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989217301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989217301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989217302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:17,385 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107520bd695a55942459e4bc8976e05f3fd_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107520bd695a55942459e4bc8976e05f3fd_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:17,386 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/abad5cce5e344d5194b282abc563aadf, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:17,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/abad5cce5e344d5194b282abc563aadf is 175, key is test_row_0/A:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742231_1407 (size=39949) 2024-11-07T14:19:17,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989217604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989217605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989217605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989217605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:17,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989217607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:17,794 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/abad5cce5e344d5194b282abc563aadf 2024-11-07T14:19:17,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/6945e89954634d2a878ff80858708dc4 is 50, key is test_row_0/B:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742232_1408 (size=12301) 2024-11-07T14:19:17,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/6945e89954634d2a878ff80858708dc4 2024-11-07T14:19:17,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/39c97f238e64479cb26136fce3858d32 is 50, key is test_row_0/C:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742233_1409 (size=12301) 2024-11-07T14:19:17,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/39c97f238e64479cb26136fce3858d32 2024-11-07T14:19:17,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/abad5cce5e344d5194b282abc563aadf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf 2024-11-07T14:19:17,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf, entries=200, sequenceid=293, filesize=39.0 K 2024-11-07T14:19:17,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/6945e89954634d2a878ff80858708dc4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4 2024-11-07T14:19:17,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4, entries=150, sequenceid=293, filesize=12.0 K 2024-11-07T14:19:17,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/39c97f238e64479cb26136fce3858d32 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32 2024-11-07T14:19:17,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32, entries=150, sequenceid=293, filesize=12.0 K 2024-11-07T14:19:17,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 4a29aaff371fad1ebbc570e5f0118052 in 891ms, sequenceid=293, compaction requested=true 2024-11-07T14:19:17,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:17,858 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:17,858 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:17,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:17,859 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:17,859 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:17,859 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/A is initiating minor compaction (all files) 2024-11-07T14:19:17,859 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/B is initiating minor compaction (all files) 2024-11-07T14:19:17,859 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/A in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:17,859 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/B in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:17,859 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=100.5 K 2024-11-07T14:19:17,859 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/295d194b2af3439f84aa86c4b0ba0635, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=36.5 K 2024-11-07T14:19:17,859 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:17,859 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf] 2024-11-07T14:19:17,860 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 295d194b2af3439f84aa86c4b0ba0635, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:17,860 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2d6faf86c44408cba3ff1b959354588, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:17,860 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 581112e436dc4dd7be1f191c7b7df145, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1730989154211 2024-11-07T14:19:17,860 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1632e9b54cd94860ba2d23318b72cdc4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1730989154211 2024-11-07T14:19:17,861 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6945e89954634d2a878ff80858708dc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730989156353 2024-11-07T14:19:17,861 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting abad5cce5e344d5194b282abc563aadf, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730989156353 2024-11-07T14:19:17,870 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:17,871 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#B#compaction#341 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:17,872 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/b5bc772817504901963a738ec374c8f0 is 50, key is test_row_0/B:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,876 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107f4b4c2ce81b2454585eed4bd7d20aa0c_4a29aaff371fad1ebbc570e5f0118052 store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:17,879 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107f4b4c2ce81b2454585eed4bd7d20aa0c_4a29aaff371fad1ebbc570e5f0118052, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:17,879 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f4b4c2ce81b2454585eed4bd7d20aa0c_4a29aaff371fad1ebbc570e5f0118052 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:17,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742234_1410 (size=12983) 2024-11-07T14:19:17,890 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/b5bc772817504901963a738ec374c8f0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b5bc772817504901963a738ec374c8f0 2024-11-07T14:19:17,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742235_1411 (size=4469) 2024-11-07T14:19:17,895 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#A#compaction#342 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:17,896 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/B of 4a29aaff371fad1ebbc570e5f0118052 into b5bc772817504901963a738ec374c8f0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:17,896 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:17,896 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/B, priority=13, startTime=1730989157858; duration=0sec 2024-11-07T14:19:17,897 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:17,897 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:17,897 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1704f37b87ed4e6994793be6e066e503 is 175, key is test_row_0/A:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,897 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:17,898 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:17,898 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 4a29aaff371fad1ebbc570e5f0118052/C is initiating minor compaction (all files) 2024-11-07T14:19:17,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4a29aaff371fad1ebbc570e5f0118052/C in TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:17,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/38fff3b8020940f7999d27b06da266ad, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp, totalSize=36.5 K 2024-11-07T14:19:17,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 38fff3b8020940f7999d27b06da266ad, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1730989152046 2024-11-07T14:19:17,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f727593f92441e6b174bcb861f631ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1730989154211 2024-11-07T14:19:17,900 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 39c97f238e64479cb26136fce3858d32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1730989156353 2024-11-07T14:19:17,911 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a29aaff371fad1ebbc570e5f0118052#C#compaction#343 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-07T14:19:17,912 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/e2a196f3b61c46e885ba0b0cae27755b is 50, key is test_row_0/C:col10/1730989156965/Put/seqid=0 2024-11-07T14:19:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742237_1413 (size=12983) 2024-11-07T14:19:17,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742236_1412 (size=31937) 2024-11-07T14:19:17,939 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/1704f37b87ed4e6994793be6e066e503 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1704f37b87ed4e6994793be6e066e503 2024-11-07T14:19:17,944 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/A of 4a29aaff371fad1ebbc570e5f0118052 into 1704f37b87ed4e6994793be6e066e503(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:17,944 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:17,944 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/A, priority=13, startTime=1730989157858; duration=0sec 2024-11-07T14:19:17,944 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:17,944 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:18,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:18,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:18,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110750a7dee253544d92927571d215c733fa_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989158109/Put/seqid=0 2024-11-07T14:19:18,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742238_1414 (size=14994) 2024-11-07T14:19:18,124 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:18,128 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110750a7dee253544d92927571d215c733fa_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110750a7dee253544d92927571d215c733fa_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:18,129 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/3917cf68e2264c3a892d0487237f7eff, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:18,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/3917cf68e2264c3a892d0487237f7eff is 175, key is test_row_0/A:col10/1730989158109/Put/seqid=0 2024-11-07T14:19:18,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989218123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989218125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989218125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989218127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989218128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742239_1415 (size=39949) 2024-11-07T14:19:18,139 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/3917cf68e2264c3a892d0487237f7eff 2024-11-07T14:19:18,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3e58e2a8f36e47018653614781e89b54 is 50, key is test_row_0/B:col10/1730989158109/Put/seqid=0 2024-11-07T14:19:18,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742240_1416 (size=12301) 2024-11-07T14:19:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989218232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989218232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989218232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989218232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989218233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,335 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/e2a196f3b61c46e885ba0b0cae27755b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e2a196f3b61c46e885ba0b0cae27755b 2024-11-07T14:19:18,341 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4a29aaff371fad1ebbc570e5f0118052/C of 4a29aaff371fad1ebbc570e5f0118052 into e2a196f3b61c46e885ba0b0cae27755b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:18,341 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:18,341 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052., storeName=4a29aaff371fad1ebbc570e5f0118052/C, priority=13, startTime=1730989157858; duration=0sec 2024-11-07T14:19:18,341 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:18,341 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:18,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989218437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989218437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989218437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989218438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989218438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3e58e2a8f36e47018653614781e89b54 2024-11-07T14:19:18,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 is 50, key is test_row_0/C:col10/1730989158109/Put/seqid=0 2024-11-07T14:19:18,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742241_1417 (size=12301) 2024-11-07T14:19:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-07T14:19:18,586 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-07T14:19:18,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-07T14:19:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:18,589 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:18,589 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:18,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:18,741 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T14:19:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989218739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:18,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989218739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:18,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989218740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989218741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989218742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:18,894 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:18,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T14:19:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:18,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:18,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 2024-11-07T14:19:18,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/3917cf68e2264c3a892d0487237f7eff as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/3917cf68e2264c3a892d0487237f7eff 2024-11-07T14:19:18,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/3917cf68e2264c3a892d0487237f7eff, entries=200, sequenceid=317, filesize=39.0 K 2024-11-07T14:19:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/3e58e2a8f36e47018653614781e89b54 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3e58e2a8f36e47018653614781e89b54 2024-11-07T14:19:18,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3e58e2a8f36e47018653614781e89b54, entries=150, sequenceid=317, filesize=12.0 K 2024-11-07T14:19:18,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 2024-11-07T14:19:18,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7, entries=150, sequenceid=317, filesize=12.0 K 2024-11-07T14:19:18,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 4a29aaff371fad1ebbc570e5f0118052 in 874ms, sequenceid=317, compaction requested=false 2024-11-07T14:19:18,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:19,047 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-07T14:19:19,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:19,048 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:19,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107225e3d6c632f463baf52de0c9bd3d506_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989158122/Put/seqid=0 2024-11-07T14:19:19,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742242_1418 (size=12454) 2024-11-07T14:19:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:19,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. as already flushing 2024-11-07T14:19:19,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989219268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989219269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989219270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989219271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989219271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989219375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989219376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989219376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989219376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989219380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:19,463 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107225e3d6c632f463baf52de0c9bd3d506_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107225e3d6c632f463baf52de0c9bd3d506_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:19,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d6a28e1c058a45dbaf63b9bdb6edd8b5, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:19,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 is 175, key is test_row_0/A:col10/1730989158122/Put/seqid=0 2024-11-07T14:19:19,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742243_1419 (size=31255) 2024-11-07T14:19:19,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989219580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989219580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989219581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989219582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989219586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:19,869 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 2024-11-07T14:19:19,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/474c9cc084c74d84a9cc036f09f340e1 is 50, key is test_row_0/B:col10/1730989158122/Put/seqid=0 2024-11-07T14:19:19,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37082 deadline: 1730989219884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37120 deadline: 1730989219886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742244_1420 (size=12301) 2024-11-07T14:19:19,888 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/474c9cc084c74d84a9cc036f09f340e1 2024-11-07T14:19:19,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37046 deadline: 1730989219886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1730989219887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/89184b94e1e74339ab7b3b1fa43c5383 is 50, key is test_row_0/C:col10/1730989158122/Put/seqid=0 2024-11-07T14:19:19,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1730989219890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:19,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742245_1421 (size=12301) 2024-11-07T14:19:19,928 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/89184b94e1e74339ab7b3b1fa43c5383 2024-11-07T14:19:19,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 2024-11-07T14:19:19,939 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d6a28e1c058a45dbaf63b9bdb6edd8b5, entries=150, sequenceid=332, filesize=30.5 K 2024-11-07T14:19:19,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/474c9cc084c74d84a9cc036f09f340e1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/474c9cc084c74d84a9cc036f09f340e1 2024-11-07T14:19:19,942 DEBUG [Thread-1515 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:51818 2024-11-07T14:19:19,942 DEBUG [Thread-1515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:19,946 DEBUG [Thread-1513 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:51818 2024-11-07T14:19:19,946 DEBUG [Thread-1513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:19,949 DEBUG [Thread-1517 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:51818 2024-11-07T14:19:19,949 DEBUG [Thread-1511 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:51818 2024-11-07T14:19:19,949 DEBUG [Thread-1517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:19,949 DEBUG [Thread-1511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:19,950 DEBUG [Thread-1509 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:51818 2024-11-07T14:19:19,950 DEBUG [Thread-1509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:19,953 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/474c9cc084c74d84a9cc036f09f340e1, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T14:19:19,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/89184b94e1e74339ab7b3b1fa43c5383 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/89184b94e1e74339ab7b3b1fa43c5383 2024-11-07T14:19:19,957 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/89184b94e1e74339ab7b3b1fa43c5383, entries=150, sequenceid=332, filesize=12.0 K 2024-11-07T14:19:19,958 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4a29aaff371fad1ebbc570e5f0118052 in 911ms, sequenceid=332, compaction requested=true 2024-11-07T14:19:19,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:19,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:19,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-07T14:19:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-07T14:19:19,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-07T14:19:19,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3700 sec 2024-11-07T14:19:19,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.3730 sec 2024-11-07T14:19:20,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:20,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:20,391 DEBUG [Thread-1506 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:51818 2024-11-07T14:19:20,391 DEBUG [Thread-1506 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:20,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:20,392 DEBUG [Thread-1500 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:51818 2024-11-07T14:19:20,392 DEBUG [Thread-1500 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076d4d9be7556b420a9fa1bb13ee8ab24a_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989159259/Put/seqid=0 2024-11-07T14:19:20,399 DEBUG [Thread-1498 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:51818 2024-11-07T14:19:20,399 DEBUG [Thread-1498 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742246_1422 (size=12454) 2024-11-07T14:19:20,401 DEBUG [Thread-1504 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:51818 2024-11-07T14:19:20,401 DEBUG [Thread-1504 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,405 DEBUG [Thread-1502 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:51818 2024-11-07T14:19:20,406 DEBUG [Thread-1502 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-07T14:19:20,693 INFO [Thread-1508 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2627 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7881 rows 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2618 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7854 rows 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2617 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7851 rows 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2628 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7884 rows 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2638 2024-11-07T14:19:20,693 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7914 rows 2024-11-07T14:19:20,693 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:19:20,693 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bf5e2f0 to 127.0.0.1:51818 2024-11-07T14:19:20,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:20,696 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:19:20,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:19:20,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:20,700 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989160699"}]},"ts":"1730989160699"} 2024-11-07T14:19:20,701 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:19:20,704 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:19:20,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:19:20,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, UNASSIGN}] 2024-11-07T14:19:20,706 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, UNASSIGN 2024-11-07T14:19:20,707 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:20,708 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:19:20,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; CloseRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:20,801 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:20,804 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076d4d9be7556b420a9fa1bb13ee8ab24a_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d4d9be7556b420a9fa1bb13ee8ab24a_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:20,805 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/af01c2636e4c4c1cbc08e9be6a40ee6e, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:20,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/af01c2636e4c4c1cbc08e9be6a40ee6e is 175, key is test_row_0/A:col10/1730989159259/Put/seqid=0 2024-11-07T14:19:20,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742247_1423 (size=31255) 2024-11-07T14:19:20,859 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:20,860 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(124): Close 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:20,860 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:19:20,860 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1681): Closing 4a29aaff371fad1ebbc570e5f0118052, disabling compactions & flushes 2024-11-07T14:19:20,860 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:21,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:21,210 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/af01c2636e4c4c1cbc08e9be6a40ee6e 2024-11-07T14:19:21,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/a7a891eacde0407b8424ea088e020f0e is 50, key is test_row_0/B:col10/1730989159259/Put/seqid=0 2024-11-07T14:19:21,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742248_1424 (size=12301) 2024-11-07T14:19:21,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/a7a891eacde0407b8424ea088e020f0e 2024-11-07T14:19:21,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/fdc2bfe7a3d34c2982066ecd76bb62f1 is 50, key is test_row_0/C:col10/1730989159259/Put/seqid=0 2024-11-07T14:19:21,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742249_1425 (size=12301) 2024-11-07T14:19:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:21,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/fdc2bfe7a3d34c2982066ecd76bb62f1 2024-11-07T14:19:21,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/af01c2636e4c4c1cbc08e9be6a40ee6e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/af01c2636e4c4c1cbc08e9be6a40ee6e 2024-11-07T14:19:21,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/af01c2636e4c4c1cbc08e9be6a40ee6e, entries=150, sequenceid=355, filesize=30.5 K 2024-11-07T14:19:21,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/a7a891eacde0407b8424ea088e020f0e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/a7a891eacde0407b8424ea088e020f0e 2024-11-07T14:19:21,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/a7a891eacde0407b8424ea088e020f0e, entries=150, sequenceid=355, filesize=12.0 K 2024-11-07T14:19:21,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/fdc2bfe7a3d34c2982066ecd76bb62f1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/fdc2bfe7a3d34c2982066ecd76bb62f1 2024-11-07T14:19:21,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/fdc2bfe7a3d34c2982066ecd76bb62f1, entries=150, sequenceid=355, filesize=12.0 K 2024-11-07T14:19:21,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for 4a29aaff371fad1ebbc570e5f0118052 in 1254ms, sequenceid=355, compaction requested=true 2024-11-07T14:19:21,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:21,644 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:21,644 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:21,644 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. after waiting 0 ms 2024-11-07T14:19:21,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:21,644 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:21,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:21,644 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. because compaction request was cancelled 2024-11-07T14:19:21,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:21,645 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(2837): Flushing 4a29aaff371fad1ebbc570e5f0118052 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-07T14:19:21,645 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:A 2024-11-07T14:19:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:21,645 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. because compaction request was cancelled 2024-11-07T14:19:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4a29aaff371fad1ebbc570e5f0118052:C, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:21,645 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:B 2024-11-07T14:19:21,645 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. because compaction request was cancelled 2024-11-07T14:19:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:21,645 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a29aaff371fad1ebbc570e5f0118052:C 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=A 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=B 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4a29aaff371fad1ebbc570e5f0118052, store=C 2024-11-07T14:19:21,645 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:21,649 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110701e70b92064148faac0e59c7cc377305_4a29aaff371fad1ebbc570e5f0118052 is 50, key is test_row_0/A:col10/1730989160404/Put/seqid=0 2024-11-07T14:19:21,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742250_1426 (size=9914) 2024-11-07T14:19:21,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:22,053 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:22,056 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110701e70b92064148faac0e59c7cc377305_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110701e70b92064148faac0e59c7cc377305_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:22,057 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/cf8c956946b446689013e3c6b65aade2, store: [table=TestAcidGuarantees family=A region=4a29aaff371fad1ebbc570e5f0118052] 2024-11-07T14:19:22,058 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/cf8c956946b446689013e3c6b65aade2 is 175, key is test_row_0/A:col10/1730989160404/Put/seqid=0 2024-11-07T14:19:22,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742251_1427 (size=22561) 2024-11-07T14:19:22,404 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T14:19:22,461 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=362, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/cf8c956946b446689013e3c6b65aade2 2024-11-07T14:19:22,467 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/97fadc7ec3f4457ba82c2c53d366b1ba is 50, key is test_row_0/B:col10/1730989160404/Put/seqid=0 2024-11-07T14:19:22,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742252_1428 (size=9857) 2024-11-07T14:19:22,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:22,870 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/97fadc7ec3f4457ba82c2c53d366b1ba 2024-11-07T14:19:22,875 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/e80bd8195aa24d39ac2b404bd6487c31 is 50, key is test_row_0/C:col10/1730989160404/Put/seqid=0 2024-11-07T14:19:22,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742253_1429 (size=9857) 2024-11-07T14:19:23,279 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/e80bd8195aa24d39ac2b404bd6487c31 2024-11-07T14:19:23,282 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/A/cf8c956946b446689013e3c6b65aade2 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/cf8c956946b446689013e3c6b65aade2 2024-11-07T14:19:23,285 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/cf8c956946b446689013e3c6b65aade2, entries=100, sequenceid=362, filesize=22.0 K 2024-11-07T14:19:23,286 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/B/97fadc7ec3f4457ba82c2c53d366b1ba as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/97fadc7ec3f4457ba82c2c53d366b1ba 2024-11-07T14:19:23,289 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/97fadc7ec3f4457ba82c2c53d366b1ba, entries=100, sequenceid=362, filesize=9.6 K 2024-11-07T14:19:23,289 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/.tmp/C/e80bd8195aa24d39ac2b404bd6487c31 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e80bd8195aa24d39ac2b404bd6487c31 2024-11-07T14:19:23,292 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e80bd8195aa24d39ac2b404bd6487c31, entries=100, sequenceid=362, filesize=9.6 K 2024-11-07T14:19:23,292 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 4a29aaff371fad1ebbc570e5f0118052 in 1648ms, sequenceid=362, compaction requested=true 2024-11-07T14:19:23,293 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf] to archive 2024-11-07T14:19:23,294 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:23,295 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a52768ffc8084becb26a265852104203 2024-11-07T14:19:23,296 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/542f2f27d7754f23b111df5d04228534 2024-11-07T14:19:23,297 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a5dbc32c0ab14f4c835e400dbbaea3b0 2024-11-07T14:19:23,298 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/8f2e9bc10990460b89fbec85338f9f8d 2024-11-07T14:19:23,298 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/5b732ee92b6a42ec97256f3165314c7c 2024-11-07T14:19:23,299 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d19d7ab7589e4348a4400a863d656c9b 2024-11-07T14:19:23,300 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/ee1fedc2753147678401a4024cf7161c 2024-11-07T14:19:23,301 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/a793f5d379c64737b6d30fece7c53154 2024-11-07T14:19:23,302 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/59a9aec5ddba4a0fa561268ac92b2b66 2024-11-07T14:19:23,303 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d82b532ffa53417bb161d2078d782ee6 2024-11-07T14:19:23,304 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/37169fb9a5594276accd9251b454d09e 2024-11-07T14:19:23,304 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/6e2dd88832ae493693edf333388f386e 2024-11-07T14:19:23,305 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/fa5c2e9e39974106a2df5bb8b703f679 2024-11-07T14:19:23,306 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f8dcdaf609c3478ab4d1a441101bc481 2024-11-07T14:19:23,307 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/7c2580f72e8d408f94ee5d7e91f94991 2024-11-07T14:19:23,307 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/bc34a71dba3d4b3bb3d59c819ed1d099 2024-11-07T14:19:23,308 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/e6031128ae354b12b286d024ff2403ee 2024-11-07T14:19:23,309 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/f2d6faf86c44408cba3ff1b959354588 2024-11-07T14:19:23,310 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1632e9b54cd94860ba2d23318b72cdc4 2024-11-07T14:19:23,311 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/abad5cce5e344d5194b282abc563aadf 2024-11-07T14:19:23,312 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/857cf722086b4dd1be207099dfed8618, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/d4d89fe6fba246aeaf9836561171e40a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/1bb0285d39054af8976251e9ee03e277, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8c10876db3f242fb88d64d16686f16d1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/295d194b2af3439f84aa86c4b0ba0635, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4] to archive 2024-11-07T14:19:23,313 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:23,314 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8b82dcb1ca4547ef982e357b0f2aeab4 2024-11-07T14:19:23,315 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/06b712a068494e33b769eacc7df3bb4e 2024-11-07T14:19:23,316 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/857cf722086b4dd1be207099dfed8618 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/857cf722086b4dd1be207099dfed8618 2024-11-07T14:19:23,317 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/229408cb3d804e25bfc393a594229423 2024-11-07T14:19:23,318 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/87366765bfbc4644b6b66607c52a6213 2024-11-07T14:19:23,318 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/d4d89fe6fba246aeaf9836561171e40a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/d4d89fe6fba246aeaf9836561171e40a 2024-11-07T14:19:23,319 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/46e55205e667445ebf2bafaf8f676696 2024-11-07T14:19:23,320 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e2669744124644b888b17a59671662ec 2024-11-07T14:19:23,321 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/1bb0285d39054af8976251e9ee03e277 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/1bb0285d39054af8976251e9ee03e277 2024-11-07T14:19:23,322 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/65595487756c4f1fa50ba21364d25010 2024-11-07T14:19:23,323 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/e070fb2a6c1d4003ae86144fc9674764 2024-11-07T14:19:23,323 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/bef06d500e284b509142b8b6cd1e3440 2024-11-07T14:19:23,324 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8c10876db3f242fb88d64d16686f16d1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/8c10876db3f242fb88d64d16686f16d1 2024-11-07T14:19:23,325 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/638945916f2045a59ed95881e80d1e84 2024-11-07T14:19:23,326 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3f6726a08f2b4f2588414c60ce4c6541 2024-11-07T14:19:23,327 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b4c5eaed581a40bcbb723f7b3a4d44c5 2024-11-07T14:19:23,328 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/295d194b2af3439f84aa86c4b0ba0635 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/295d194b2af3439f84aa86c4b0ba0635 2024-11-07T14:19:23,328 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/fde68338c7ed44c8b00e3df4f93a8b04 2024-11-07T14:19:23,329 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/581112e436dc4dd7be1f191c7b7df145 2024-11-07T14:19:23,330 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/6945e89954634d2a878ff80858708dc4 2024-11-07T14:19:23,331 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f161e62e5f6e46efbdf6b11c07b755e9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ac6a992a3cc04a6b9c5a1bbcfc284607, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f79f6667a57f46bcb8761baac6fb335b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/267ce9293c0c4775a61fb4f06be88762, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/38fff3b8020940f7999d27b06da266ad, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32] to archive 2024-11-07T14:19:23,332 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:23,333 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/490b16d49de44e13b7d2ff9244bf9f18 2024-11-07T14:19:23,333 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0fd248cc325d41e5b6391ba8aac39b9e 2024-11-07T14:19:23,334 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f161e62e5f6e46efbdf6b11c07b755e9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f161e62e5f6e46efbdf6b11c07b755e9 2024-11-07T14:19:23,335 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/794715e5800d4ae7a0971bff45c75bfa 2024-11-07T14:19:23,336 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4ad85c0514644bd28f4f1f988e681369 2024-11-07T14:19:23,337 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ac6a992a3cc04a6b9c5a1bbcfc284607 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ac6a992a3cc04a6b9c5a1bbcfc284607 2024-11-07T14:19:23,337 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/0e25b5c6a6cd4e6a903912cfe2067396 2024-11-07T14:19:23,338 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/cca6c99e67eb482eb0e5a070c51310bf 2024-11-07T14:19:23,339 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f79f6667a57f46bcb8761baac6fb335b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/f79f6667a57f46bcb8761baac6fb335b 2024-11-07T14:19:23,340 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/ee6394027cbb45e28f6d25fe4153437e 2024-11-07T14:19:23,340 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/750039bc94184818bb8a4604576b3473 2024-11-07T14:19:23,341 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/a3dd20bf8ac745b79a913e4f815bf882 2024-11-07T14:19:23,342 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/267ce9293c0c4775a61fb4f06be88762 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/267ce9293c0c4775a61fb4f06be88762 2024-11-07T14:19:23,343 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/10b6a1b700a046d395db9ec48b869b54 2024-11-07T14:19:23,344 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/bd154a7052ad45cc8ce20129f540db16 2024-11-07T14:19:23,345 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/98d4bc8d0547457c80dabbe05d2a93d1 2024-11-07T14:19:23,346 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/38fff3b8020940f7999d27b06da266ad to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/38fff3b8020940f7999d27b06da266ad 2024-11-07T14:19:23,346 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/1feb6688993948ce88d9b9c5673974f8 2024-11-07T14:19:23,347 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/4f727593f92441e6b174bcb861f631ff 2024-11-07T14:19:23,348 DEBUG [StoreCloser-TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/39c97f238e64479cb26136fce3858d32 2024-11-07T14:19:23,351 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits/365.seqid, newMaxSeqId=365, maxSeqId=4 2024-11-07T14:19:23,352 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052. 2024-11-07T14:19:23,352 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1635): Region close journal for 4a29aaff371fad1ebbc570e5f0118052: 2024-11-07T14:19:23,353 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(170): Closed 4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:23,353 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=4a29aaff371fad1ebbc570e5f0118052, regionState=CLOSED 2024-11-07T14:19:23,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-07T14:19:23,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseRegionProcedure 4a29aaff371fad1ebbc570e5f0118052, server=69430dbfd73f,45917,1730989044081 in 2.6460 sec 2024-11-07T14:19:23,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-11-07T14:19:23,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4a29aaff371fad1ebbc570e5f0118052, UNASSIGN in 2.6490 sec 2024-11-07T14:19:23,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-07T14:19:23,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.6520 sec 2024-11-07T14:19:23,358 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989163358"}]},"ts":"1730989163358"} 2024-11-07T14:19:23,358 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:19:23,360 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:19:23,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.6640 sec 2024-11-07T14:19:23,828 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T14:19:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-07T14:19:24,804 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-07T14:19:24,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:19:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,806 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=118, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T14:19:24,807 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=118, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,809 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,811 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits] 2024-11-07T14:19:24,814 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1704f37b87ed4e6994793be6e066e503 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/1704f37b87ed4e6994793be6e066e503 2024-11-07T14:19:24,815 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/3917cf68e2264c3a892d0487237f7eff to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/3917cf68e2264c3a892d0487237f7eff 2024-11-07T14:19:24,817 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/af01c2636e4c4c1cbc08e9be6a40ee6e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/af01c2636e4c4c1cbc08e9be6a40ee6e 2024-11-07T14:19:24,818 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/cf8c956946b446689013e3c6b65aade2 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/cf8c956946b446689013e3c6b65aade2 2024-11-07T14:19:24,819 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/A/d6a28e1c058a45dbaf63b9bdb6edd8b5 2024-11-07T14:19:24,821 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3e58e2a8f36e47018653614781e89b54 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/3e58e2a8f36e47018653614781e89b54 2024-11-07T14:19:24,822 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/474c9cc084c74d84a9cc036f09f340e1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/474c9cc084c74d84a9cc036f09f340e1 2024-11-07T14:19:24,823 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/97fadc7ec3f4457ba82c2c53d366b1ba to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/97fadc7ec3f4457ba82c2c53d366b1ba 2024-11-07T14:19:24,825 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/a7a891eacde0407b8424ea088e020f0e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/a7a891eacde0407b8424ea088e020f0e 2024-11-07T14:19:24,826 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b5bc772817504901963a738ec374c8f0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/B/b5bc772817504901963a738ec374c8f0 2024-11-07T14:19:24,828 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/89184b94e1e74339ab7b3b1fa43c5383 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/89184b94e1e74339ab7b3b1fa43c5383 2024-11-07T14:19:24,830 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/d5b72b4b6bfc44d2a1f2b5806eafdfb7 2024-11-07T14:19:24,831 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e2a196f3b61c46e885ba0b0cae27755b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e2a196f3b61c46e885ba0b0cae27755b 2024-11-07T14:19:24,832 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e80bd8195aa24d39ac2b404bd6487c31 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/e80bd8195aa24d39ac2b404bd6487c31 2024-11-07T14:19:24,833 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/fdc2bfe7a3d34c2982066ecd76bb62f1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/C/fdc2bfe7a3d34c2982066ecd76bb62f1 2024-11-07T14:19:24,835 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits/365.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052/recovered.edits/365.seqid 2024-11-07T14:19:24,835 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,836 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:19:24,836 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:19:24,837 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T14:19:24,840 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110701e70b92064148faac0e59c7cc377305_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110701e70b92064148faac0e59c7cc377305_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,841 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107090f85176400494abe2908b9b62bf014_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107090f85176400494abe2908b9b62bf014_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,842 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107091f65e9fc3b43b28f78fc400382cc65_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107091f65e9fc3b43b28f78fc400382cc65_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,843 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107225e3d6c632f463baf52de0c9bd3d506_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107225e3d6c632f463baf52de0c9bd3d506_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,845 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073a9bf13d465c412da4c446cfb92b6725_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411073a9bf13d465c412da4c446cfb92b6725_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,846 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110750a7dee253544d92927571d215c733fa_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110750a7dee253544d92927571d215c733fa_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,848 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107517b8d014d714a229e43e2f3e03b359c_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107517b8d014d714a229e43e2f3e03b359c_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,849 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107520bd695a55942459e4bc8976e05f3fd_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107520bd695a55942459e4bc8976e05f3fd_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,851 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076069d012c19e4008bca684564e5a592b_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076069d012c19e4008bca684564e5a592b_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,852 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107607c0286bd334b3ca34f3b37fd42c194_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107607c0286bd334b3ca34f3b37fd42c194_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,853 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d4d9be7556b420a9fa1bb13ee8ab24a_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d4d9be7556b420a9fa1bb13ee8ab24a_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,854 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e52e20ee26b458b967a76ab785296f4_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e52e20ee26b458b967a76ab785296f4_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,855 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079326e8be43f74cf8a7114ed11da98ff8_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411079326e8be43f74cf8a7114ed11da98ff8_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,856 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b2872471091d46a1bf91b6a6c30242ff_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107b2872471091d46a1bf91b6a6c30242ff_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,857 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ba616d51ed0548dbb5ddbedaea3c9ce8_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ba616d51ed0548dbb5ddbedaea3c9ce8_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,858 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107bb882038d32142bf948fff64cfcf996f_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107bb882038d32142bf948fff64cfcf996f_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,859 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c3a9c1f31f7e46ea8d34446b0e74c4ed_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c3a9c1f31f7e46ea8d34446b0e74c4ed_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,860 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c4932847ef1344a796bef2f624fefcd9_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c4932847ef1344a796bef2f624fefcd9_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,861 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107cbc467ff97d34b8ca7dfdda3671cd655_4a29aaff371fad1ebbc570e5f0118052 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107cbc467ff97d34b8ca7dfdda3671cd655_4a29aaff371fad1ebbc570e5f0118052 2024-11-07T14:19:24,862 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:19:24,863 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=118, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,865 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:19:24,867 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:19:24,868 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=118, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,868 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:19:24,868 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989164868"}]},"ts":"9223372036854775807"} 2024-11-07T14:19:24,869 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:19:24,869 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4a29aaff371fad1ebbc570e5f0118052, NAME => 'TestAcidGuarantees,,1730989136874.4a29aaff371fad1ebbc570e5f0118052.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:19:24,869 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:19:24,870 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989164869"}]},"ts":"9223372036854775807"} 2024-11-07T14:19:24,871 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:19:24,873 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=118, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,873 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 68 msec 2024-11-07T14:19:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-07T14:19:24,908 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-07T14:19:24,918 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=240 (was 236) - Thread LEAK? -, OpenFileDescriptor=461 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=412 (was 364) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5718 (was 5831) 2024-11-07T14:19:24,927 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=412, ProcessCount=11, AvailableMemoryMB=5718 2024-11-07T14:19:24,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:19:24,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:19:24,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=119, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:24,930 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:19:24,930 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:24,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 119 2024-11-07T14:19:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:24,930 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:19:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742254_1430 (size=960) 2024-11-07T14:19:25,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:25,337 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:19:25,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742255_1431 (size=53) 2024-11-07T14:19:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:25,742 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:19:25,743 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e3403f2020527e03e67f0f2ab02983ef, disabling compactions & flushes 2024-11-07T14:19:25,743 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:25,743 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:25,743 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. after waiting 0 ms 2024-11-07T14:19:25,743 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:25,743 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:25,743 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:25,744 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:19:25,744 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989165744"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989165744"}]},"ts":"1730989165744"} 2024-11-07T14:19:25,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:19:25,746 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:19:25,746 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989165746"}]},"ts":"1730989165746"} 2024-11-07T14:19:25,747 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:19:25,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, ASSIGN}] 2024-11-07T14:19:25,752 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, ASSIGN 2024-11-07T14:19:25,752 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:19:25,903 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=e3403f2020527e03e67f0f2ab02983ef, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:25,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:26,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:26,058 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:26,058 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:19:26,059 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,059 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:19:26,059 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,059 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,060 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,061 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:26,061 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3403f2020527e03e67f0f2ab02983ef columnFamilyName A 2024-11-07T14:19:26,061 DEBUG [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:26,062 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(327): Store=e3403f2020527e03e67f0f2ab02983ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:26,062 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,063 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:26,063 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3403f2020527e03e67f0f2ab02983ef columnFamilyName B 2024-11-07T14:19:26,063 DEBUG [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:26,063 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(327): Store=e3403f2020527e03e67f0f2ab02983ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:26,063 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,065 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:26,065 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3403f2020527e03e67f0f2ab02983ef columnFamilyName C 2024-11-07T14:19:26,065 DEBUG [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:26,065 INFO [StoreOpener-e3403f2020527e03e67f0f2ab02983ef-1 {}] regionserver.HStore(327): Store=e3403f2020527e03e67f0f2ab02983ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:26,065 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:26,066 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,066 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,067 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:19:26,068 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:26,070 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:19:26,070 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened e3403f2020527e03e67f0f2ab02983ef; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61161399, jitterRate=-0.08862413465976715}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:19:26,071 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:26,071 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., pid=121, masterSystemTime=1730989166055 2024-11-07T14:19:26,073 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:26,073 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:26,073 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=e3403f2020527e03e67f0f2ab02983ef, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:26,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-07T14:19:26,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 in 170 msec 2024-11-07T14:19:26,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-11-07T14:19:26,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, ASSIGN in 324 msec 2024-11-07T14:19:26,077 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:19:26,077 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989166077"}]},"ts":"1730989166077"} 2024-11-07T14:19:26,078 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:19:26,080 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:19:26,081 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1520 sec 2024-11-07T14:19:26,324 DEBUG [regionserver/69430dbfd73f:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.0, tune throughput to 50.00 MB/second 2024-11-07T14:19:26,882 ERROR [LeaseRenewer:jenkins@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:34807,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-11-07T14:19:27,034 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 119 completed 2024-11-07T14:19:27,035 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-07T14:19:27,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,040 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,041 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,042 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:19:27,043 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:19:27,045 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-07T14:19:27,048 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,049 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-07T14:19:27,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-07T14:19:27,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,056 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-07T14:19:27,059 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,060 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-07T14:19:27,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-07T14:19:27,071 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,071 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-07T14:19:27,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,078 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-07T14:19:27,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,082 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-07T14:19:27,085 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,086 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73d92042 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c692575 2024-11-07T14:19:27,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e96b8ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:27,096 DEBUG [hconnection-0x78e8bbd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,097 DEBUG [hconnection-0x4eaf9b75-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,098 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,098 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:27,099 DEBUG [hconnection-0x55072eb0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,100 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-07T14:19:27,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:27,104 DEBUG [hconnection-0x195327bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,105 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:27,105 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,106 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:27,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:27,108 DEBUG [hconnection-0x62d64946-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,109 DEBUG [hconnection-0x369c5f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,109 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,110 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,110 DEBUG [hconnection-0x538a3640-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,111 DEBUG [hconnection-0x75953404-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,111 DEBUG [hconnection-0x2f44fa98-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,111 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,112 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:27,112 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:19:27,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989227123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989227123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989227123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989227124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,125 DEBUG [hconnection-0x4f11267e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:27,126 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:27,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989227127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/644c2342b4b84ddb81aaa9ffda6c4d4b is 50, key is test_row_0/A:col10/1730989167109/Put/seqid=0 2024-11-07T14:19:27,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742256_1432 (size=14341) 2024-11-07T14:19:27,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/644c2342b4b84ddb81aaa9ffda6c4d4b 2024-11-07T14:19:27,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/67b2d1ce91914642bf7a2e7255b3731b is 50, key is test_row_0/B:col10/1730989167109/Put/seqid=0 2024-11-07T14:19:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:27,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742257_1433 (size=12001) 2024-11-07T14:19:27,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989227224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989227224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989227225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989227226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989227228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-07T14:19:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:27,410 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-07T14:19:27,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:27,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989227428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989227428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989227428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989227430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989227433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,563 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-07T14:19:27,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:27,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:27,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/67b2d1ce91914642bf7a2e7255b3731b 2024-11-07T14:19:27,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ad58e06505464133b59e9f2ada400bf9 is 50, key is test_row_0/C:col10/1730989167109/Put/seqid=0 2024-11-07T14:19:27,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742258_1434 (size=12001) 2024-11-07T14:19:27,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ad58e06505464133b59e9f2ada400bf9 2024-11-07T14:19:27,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/644c2342b4b84ddb81aaa9ffda6c4d4b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b 2024-11-07T14:19:27,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b, entries=200, sequenceid=14, filesize=14.0 K 2024-11-07T14:19:27,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/67b2d1ce91914642bf7a2e7255b3731b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b 2024-11-07T14:19:27,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b, entries=150, sequenceid=14, filesize=11.7 K 2024-11-07T14:19:27,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ad58e06505464133b59e9f2ada400bf9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9 2024-11-07T14:19:27,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9, entries=150, sequenceid=14, filesize=11.7 K 2024-11-07T14:19:27,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for e3403f2020527e03e67f0f2ab02983ef in 562ms, sequenceid=14, compaction requested=false 2024-11-07T14:19:27,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:27,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-07T14:19:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:27,721 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:19:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:27,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/437f4f9f15ea46f19cbada4f63e56e3e is 50, key is test_row_0/A:col10/1730989167123/Put/seqid=0 2024-11-07T14:19:27,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742259_1435 (size=12001) 2024-11-07T14:19:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:27,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:27,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989227739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989227739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989227741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989227742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989227743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989227844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989227844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989227847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989227847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:27,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989227847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,020 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:28,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989228047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989228047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989228052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989228053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989228053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,130 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/437f4f9f15ea46f19cbada4f63e56e3e 2024-11-07T14:19:28,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/dbaee0ed0c1f47c0b4b1397909cfad86 is 50, key is test_row_0/B:col10/1730989167123/Put/seqid=0 2024-11-07T14:19:28,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742260_1436 (size=12001) 2024-11-07T14:19:28,143 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/dbaee0ed0c1f47c0b4b1397909cfad86 2024-11-07T14:19:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/112a1009afe9402ab729d1749f68fb4a is 50, key is test_row_0/C:col10/1730989167123/Put/seqid=0 2024-11-07T14:19:28,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742261_1437 (size=12001) 2024-11-07T14:19:28,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:28,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989228351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989228351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989228357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989228358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989228360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,556 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/112a1009afe9402ab729d1749f68fb4a 2024-11-07T14:19:28,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/437f4f9f15ea46f19cbada4f63e56e3e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e 2024-11-07T14:19:28,580 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e, entries=150, sequenceid=38, filesize=11.7 K 2024-11-07T14:19:28,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/dbaee0ed0c1f47c0b4b1397909cfad86 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86 2024-11-07T14:19:28,587 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86, entries=150, sequenceid=38, filesize=11.7 K 2024-11-07T14:19:28,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/112a1009afe9402ab729d1749f68fb4a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a 2024-11-07T14:19:28,593 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a, entries=150, sequenceid=38, filesize=11.7 K 2024-11-07T14:19:28,594 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e3403f2020527e03e67f0f2ab02983ef in 873ms, sequenceid=38, compaction requested=false 2024-11-07T14:19:28,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:28,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:28,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-07T14:19:28,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-07T14:19:28,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-07T14:19:28,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4900 sec 2024-11-07T14:19:28,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.5000 sec 2024-11-07T14:19:28,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:28,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:28,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:28,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:28,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:28,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:28,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:28,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:28,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/00dfc91a29994836993d682d54a9c021 is 50, key is test_row_0/A:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:28,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742262_1438 (size=12001) 2024-11-07T14:19:28,884 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:19:28,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989228889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989228892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989228894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989228895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989228895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:28,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:28,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989228996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989229000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989229001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989229001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989229001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-07T14:19:29,205 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-07T14:19:29,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989229201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:29,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-07T14:19:29,208 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:29,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:29,209 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:29,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:29,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989229207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989229208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989229208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989229209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/00dfc91a29994836993d682d54a9c021 2024-11-07T14:19:29,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e69d39654e6d4b948a7a7e77c2cf1a15 is 50, key is test_row_0/B:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:29,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742263_1439 (size=12001) 2024-11-07T14:19:29,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e69d39654e6d4b948a7a7e77c2cf1a15 2024-11-07T14:19:29,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7698920be86648ecaf303d4197e801eb is 50, key is test_row_0/C:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:29,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742264_1440 (size=12001) 2024-11-07T14:19:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:29,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-07T14:19:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:29,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989229509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-07T14:19:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989229517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989229522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989229522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989229522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-07T14:19:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:29,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7698920be86648ecaf303d4197e801eb 2024-11-07T14:19:29,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/00dfc91a29994836993d682d54a9c021 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021 2024-11-07T14:19:29,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021, entries=150, sequenceid=51, filesize=11.7 K 2024-11-07T14:19:29,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e69d39654e6d4b948a7a7e77c2cf1a15 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15 2024-11-07T14:19:29,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15, entries=150, sequenceid=51, filesize=11.7 K 2024-11-07T14:19:29,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7698920be86648ecaf303d4197e801eb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb 2024-11-07T14:19:29,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb, entries=150, sequenceid=51, filesize=11.7 K 2024-11-07T14:19:29,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e3403f2020527e03e67f0f2ab02983ef in 862ms, sequenceid=51, compaction requested=true 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:29,722 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:29,722 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:29,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:29,723 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:29,723 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:29,723 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:29,723 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:29,723 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,723 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,724 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.2 K 2024-11-07T14:19:29,724 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=37.4 K 2024-11-07T14:19:29,724 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 67b2d1ce91914642bf7a2e7255b3731b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730989167109 2024-11-07T14:19:29,724 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 644c2342b4b84ddb81aaa9ffda6c4d4b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730989167108 2024-11-07T14:19:29,725 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting dbaee0ed0c1f47c0b4b1397909cfad86, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1730989167121 2024-11-07T14:19:29,725 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 437f4f9f15ea46f19cbada4f63e56e3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1730989167121 2024-11-07T14:19:29,725 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e69d39654e6d4b948a7a7e77c2cf1a15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:29,725 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00dfc91a29994836993d682d54a9c021, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:29,732 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:29,733 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/8f954887c1ed4b1c9c7f7777b07a9638 is 50, key is test_row_0/A:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:29,735 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#366 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:29,736 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/d7f2fe32a3734067be22d5b1c1e8ebd8 is 50, key is test_row_0/B:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:29,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742265_1441 (size=12104) 2024-11-07T14:19:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742266_1442 (size=12104) 2024-11-07T14:19:29,743 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/8f954887c1ed4b1c9c7f7777b07a9638 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/8f954887c1ed4b1c9c7f7777b07a9638 2024-11-07T14:19:29,749 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 8f954887c1ed4b1c9c7f7777b07a9638(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:29,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:29,749 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989169722; duration=0sec 2024-11-07T14:19:29,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:29,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:29,749 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:29,751 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:29,751 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:29,751 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,751 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.2 K 2024-11-07T14:19:29,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad58e06505464133b59e9f2ada400bf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1730989167109 2024-11-07T14:19:29,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 112a1009afe9402ab729d1749f68fb4a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1730989167121 2024-11-07T14:19:29,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7698920be86648ecaf303d4197e801eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:29,772 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#367 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:29,773 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/27add678d4004869b0659bbf609606ec is 50, key is test_row_0/C:col10/1730989167742/Put/seqid=0 2024-11-07T14:19:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742267_1443 (size=12104) 2024-11-07T14:19:29,784 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/27add678d4004869b0659bbf609606ec as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/27add678d4004869b0659bbf609606ec 2024-11-07T14:19:29,797 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 27add678d4004869b0659bbf609606ec(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:29,797 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:29,797 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989169722; duration=0sec 2024-11-07T14:19:29,797 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:29,797 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:29,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:29,819 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:29,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-07T14:19:29,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:29,820 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:29,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:29,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3ef192bd07454bcb835c15b2cb6bf595 is 50, key is test_row_0/A:col10/1730989168892/Put/seqid=0 2024-11-07T14:19:29,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742268_1444 (size=12001) 2024-11-07T14:19:29,845 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3ef192bd07454bcb835c15b2cb6bf595 2024-11-07T14:19:29,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/d78f473dba654e9ea05c55e23ea2a1d6 is 50, key is test_row_0/B:col10/1730989168892/Put/seqid=0 2024-11-07T14:19:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742269_1445 (size=12001) 2024-11-07T14:19:30,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:30,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:30,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989230030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989230031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989230032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989230033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989230036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989230137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989230137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989230137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989230138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989230139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,148 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/d7f2fe32a3734067be22d5b1c1e8ebd8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d7f2fe32a3734067be22d5b1c1e8ebd8 2024-11-07T14:19:30,152 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into d7f2fe32a3734067be22d5b1c1e8ebd8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:30,152 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:30,152 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989169722; duration=0sec 2024-11-07T14:19:30,152 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:30,152 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:30,259 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/d78f473dba654e9ea05c55e23ea2a1d6 2024-11-07T14:19:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/e64bd457c1fb4b3cafb395f83acb9a82 is 50, key is test_row_0/C:col10/1730989168892/Put/seqid=0 2024-11-07T14:19:30,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742270_1446 (size=12001) 2024-11-07T14:19:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:30,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989230340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989230340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989230340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989230341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989230344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989230647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989230647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989230647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989230648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989230649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:30,671 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/e64bd457c1fb4b3cafb395f83acb9a82 2024-11-07T14:19:30,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3ef192bd07454bcb835c15b2cb6bf595 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595 2024-11-07T14:19:30,679 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T14:19:30,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/d78f473dba654e9ea05c55e23ea2a1d6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6 2024-11-07T14:19:30,684 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T14:19:30,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/e64bd457c1fb4b3cafb395f83acb9a82 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82 2024-11-07T14:19:30,690 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82, entries=150, sequenceid=76, filesize=11.7 K 2024-11-07T14:19:30,690 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e3403f2020527e03e67f0f2ab02983ef in 870ms, sequenceid=76, compaction requested=false 2024-11-07T14:19:30,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-07T14:19:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-07T14:19:30,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-07T14:19:30,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4830 sec 2024-11-07T14:19:30,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.4890 sec 2024-11-07T14:19:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:31,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:31,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:31,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64e29564b8804f68a5ee0e9894da47e8 is 50, key is test_row_0/A:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:31,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742271_1447 (size=14341) 2024-11-07T14:19:31,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989231208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989231216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989231217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989231217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989231218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-07T14:19:31,312 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-07T14:19:31,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-07T14:19:31,315 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:31,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:31,316 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:31,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:31,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989231319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989231323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989231324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989231324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989231324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:31,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:31,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:31,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989231525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989231529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989231529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989231529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989231529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64e29564b8804f68a5ee0e9894da47e8 2024-11-07T14:19:31,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/cd620be867c448cf80e85e94e8d86e63 is 50, key is test_row_0/B:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:31,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742272_1448 (size=12001) 2024-11-07T14:19:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:31,621 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:31,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:31,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:31,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:31,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989231829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989231834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989231834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989231834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989231836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:31,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:31,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:31,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:31,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:31,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:31,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/cd620be867c448cf80e85e94e8d86e63 2024-11-07T14:19:31,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/1f57ee2e1f3b4b1499764a0632b73757 is 50, key is test_row_0/C:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:31,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742273_1449 (size=12001) 2024-11-07T14:19:32,078 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:32,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:32,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:32,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:32,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:32,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989232337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:32,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989232340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:32,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989232341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:32,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989232342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:32,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989232343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,384 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:32,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:32,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:32,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/1f57ee2e1f3b4b1499764a0632b73757 2024-11-07T14:19:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64e29564b8804f68a5ee0e9894da47e8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8 2024-11-07T14:19:32,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8, entries=200, sequenceid=91, filesize=14.0 K 2024-11-07T14:19:32,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/cd620be867c448cf80e85e94e8d86e63 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63 2024-11-07T14:19:32,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63, entries=150, sequenceid=91, filesize=11.7 K 2024-11-07T14:19:32,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/1f57ee2e1f3b4b1499764a0632b73757 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757 2024-11-07T14:19:32,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757, entries=150, sequenceid=91, filesize=11.7 K 2024-11-07T14:19:32,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e3403f2020527e03e67f0f2ab02983ef in 1248ms, sequenceid=91, compaction requested=true 2024-11-07T14:19:32,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:32,405 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:32,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:32,406 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:32,406 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:32,406 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:32,406 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,406 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/8f954887c1ed4b1c9c7f7777b07a9638, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=37.5 K 2024-11-07T14:19:32,407 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f954887c1ed4b1c9c7f7777b07a9638, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:32,407 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:32,407 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:32,407 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,407 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d7f2fe32a3734067be22d5b1c1e8ebd8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.3 K 2024-11-07T14:19:32,407 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ef192bd07454bcb835c15b2cb6bf595, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730989168892 2024-11-07T14:19:32,407 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d7f2fe32a3734067be22d5b1c1e8ebd8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:32,408 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64e29564b8804f68a5ee0e9894da47e8, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:32,408 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d78f473dba654e9ea05c55e23ea2a1d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730989168892 2024-11-07T14:19:32,408 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cd620be867c448cf80e85e94e8d86e63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:32,416 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#374 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:32,417 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/848a425a7d2b44439070f73e37758456 is 50, key is test_row_0/A:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:32,418 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#375 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:32,418 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/191bc0c402e4481a924c682eb58d42ac is 50, key is test_row_0/B:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:32,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742274_1450 (size=12207) 2024-11-07T14:19:32,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742275_1451 (size=12207) 2024-11-07T14:19:32,431 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/848a425a7d2b44439070f73e37758456 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/848a425a7d2b44439070f73e37758456 2024-11-07T14:19:32,433 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/191bc0c402e4481a924c682eb58d42ac as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/191bc0c402e4481a924c682eb58d42ac 2024-11-07T14:19:32,437 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 848a425a7d2b44439070f73e37758456(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:32,437 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:32,437 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989172405; duration=0sec 2024-11-07T14:19:32,437 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:32,437 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:32,437 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:32,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:32,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:32,438 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,438 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/27add678d4004869b0659bbf609606ec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.3 K 2024-11-07T14:19:32,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27add678d4004869b0659bbf609606ec, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1730989167737 2024-11-07T14:19:32,439 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e64bd457c1fb4b3cafb395f83acb9a82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1730989168892 2024-11-07T14:19:32,439 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f57ee2e1f3b4b1499764a0632b73757, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:32,440 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into 191bc0c402e4481a924c682eb58d42ac(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:32,440 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:32,440 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989172406; duration=0sec 2024-11-07T14:19:32,440 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:32,440 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:32,446 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:32,446 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/3a3be7d0a5f940729f395ff6e58ff9d0 is 50, key is test_row_0/C:col10/1730989170031/Put/seqid=0 2024-11-07T14:19:32,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742276_1452 (size=12207) 2024-11-07T14:19:32,457 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/3a3be7d0a5f940729f395ff6e58ff9d0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3a3be7d0a5f940729f395ff6e58ff9d0 2024-11-07T14:19:32,461 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 3a3be7d0a5f940729f395ff6e58ff9d0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:32,461 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:32,461 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989172406; duration=0sec 2024-11-07T14:19:32,461 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:32,461 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:32,537 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:32,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-07T14:19:32,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:32,537 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:32,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d98e86ffb87b40528ddd351f5561a1a9 is 50, key is test_row_0/A:col10/1730989171217/Put/seqid=0 2024-11-07T14:19:32,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742277_1453 (size=12001) 2024-11-07T14:19:32,551 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d98e86ffb87b40528ddd351f5561a1a9 2024-11-07T14:19:32,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/1c99310d49254c4aaf7ae2baa125971b is 50, key is test_row_0/B:col10/1730989171217/Put/seqid=0 2024-11-07T14:19:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742278_1454 (size=12001) 2024-11-07T14:19:32,963 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/1c99310d49254c4aaf7ae2baa125971b 2024-11-07T14:19:32,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7c9f4c586c704bab9a69815bb1ab83f0 is 50, key is test_row_0/C:col10/1730989171217/Put/seqid=0 2024-11-07T14:19:32,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742279_1455 (size=12001) 2024-11-07T14:19:32,984 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7c9f4c586c704bab9a69815bb1ab83f0 2024-11-07T14:19:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d98e86ffb87b40528ddd351f5561a1a9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9 2024-11-07T14:19:32,992 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9, entries=150, sequenceid=116, filesize=11.7 K 2024-11-07T14:19:32,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/1c99310d49254c4aaf7ae2baa125971b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b 2024-11-07T14:19:32,996 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b, entries=150, sequenceid=116, filesize=11.7 K 2024-11-07T14:19:32,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7c9f4c586c704bab9a69815bb1ab83f0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0 2024-11-07T14:19:33,001 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0, entries=150, sequenceid=116, filesize=11.7 K 2024-11-07T14:19:33,001 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for e3403f2020527e03e67f0f2ab02983ef in 464ms, sequenceid=116, compaction requested=false 2024-11-07T14:19:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-07T14:19:33,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-07T14:19:33,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-07T14:19:33,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6870 sec 2024-11-07T14:19:33,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.6900 sec 2024-11-07T14:19:33,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:33,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:33,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:33,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/eb8c30b85743472d888ef638e70efc48 is 50, key is test_row_0/A:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:33,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742280_1456 (size=16681) 2024-11-07T14:19:33,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989233381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989233381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989233384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989233385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989233386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-07T14:19:33,420 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-07T14:19:33,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-07T14:19:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:33,423 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:33,423 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:33,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:33,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989233487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989233488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989233488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989233490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989233492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:33,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:33,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:33,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989233692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989233692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989233692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989233692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989233701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:33,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:33,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:33,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/eb8c30b85743472d888ef638e70efc48 2024-11-07T14:19:33,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/3292ef3e41f841d3be4d78340a048711 is 50, key is test_row_0/B:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:33,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742281_1457 (size=12001) 2024-11-07T14:19:33,828 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-07T14:19:33,828 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-07T14:19:33,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:33,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:33,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:33,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:33,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:33,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989233998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989234002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989234002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989234002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989234009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:34,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:34,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:34,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/3292ef3e41f841d3be4d78340a048711 2024-11-07T14:19:34,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8eb6617a6b52432f81fcebee0e0a3f97 is 50, key is test_row_0/C:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:34,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742282_1458 (size=12001) 2024-11-07T14:19:34,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:34,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:34,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:34,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:34,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:34,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:34,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:34,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989234505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989234509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989234510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989234510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:34,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989234511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:34,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8eb6617a6b52432f81fcebee0e0a3f97 2024-11-07T14:19:34,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/eb8c30b85743472d888ef638e70efc48 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48 2024-11-07T14:19:34,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48, entries=250, sequenceid=127, filesize=16.3 K 2024-11-07T14:19:34,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/3292ef3e41f841d3be4d78340a048711 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711 2024-11-07T14:19:34,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711, entries=150, sequenceid=127, filesize=11.7 K 2024-11-07T14:19:34,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8eb6617a6b52432f81fcebee0e0a3f97 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97 2024-11-07T14:19:34,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97, entries=150, sequenceid=127, filesize=11.7 K 2024-11-07T14:19:34,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e3403f2020527e03e67f0f2ab02983ef in 1247ms, sequenceid=127, compaction requested=true 2024-11-07T14:19:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:34,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:34,602 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:34,602 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:34,603 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,603 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/848a425a7d2b44439070f73e37758456, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=39.9 K 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 848a425a7d2b44439070f73e37758456, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:34,603 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,603 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/191bc0c402e4481a924c682eb58d42ac, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.4 K 2024-11-07T14:19:34,603 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d98e86ffb87b40528ddd351f5561a1a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1730989171216 2024-11-07T14:19:34,604 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 191bc0c402e4481a924c682eb58d42ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:34,604 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb8c30b85743472d888ef638e70efc48, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:34,604 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c99310d49254c4aaf7ae2baa125971b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1730989171216 2024-11-07T14:19:34,604 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3292ef3e41f841d3be4d78340a048711, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:34,612 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#383 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:34,612 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/6dfdda269cdc48f58cd8ec40c84971b5 is 50, key is test_row_0/A:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:34,614 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#384 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:34,615 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/df30f0ed34944201bccb8611f5ed8d36 is 50, key is test_row_0/B:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:34,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742284_1460 (size=12309) 2024-11-07T14:19:34,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742283_1459 (size=12309) 2024-11-07T14:19:34,628 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/df30f0ed34944201bccb8611f5ed8d36 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/df30f0ed34944201bccb8611f5ed8d36 2024-11-07T14:19:34,630 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/6dfdda269cdc48f58cd8ec40c84971b5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/6dfdda269cdc48f58cd8ec40c84971b5 2024-11-07T14:19:34,634 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 6dfdda269cdc48f58cd8ec40c84971b5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:34,634 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:34,634 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989174601; duration=0sec 2024-11-07T14:19:34,634 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:34,634 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:34,634 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:34,635 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into df30f0ed34944201bccb8611f5ed8d36(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:34,635 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:34,635 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989174602; duration=0sec 2024-11-07T14:19:34,635 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:34,635 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:34,636 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:34,636 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:34,636 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,636 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3a3be7d0a5f940729f395ff6e58ff9d0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.4 K 2024-11-07T14:19:34,636 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a3be7d0a5f940729f395ff6e58ff9d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1730989170031 2024-11-07T14:19:34,637 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c9f4c586c704bab9a69815bb1ab83f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1730989171216 2024-11-07T14:19:34,637 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8eb6617a6b52432f81fcebee0e0a3f97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:34,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:34,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-07T14:19:34,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:34,645 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:34,646 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:34,647 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/d57ba986568348e7b05b1fcf475f2a17 is 50, key is test_row_0/C:col10/1730989173349/Put/seqid=0 2024-11-07T14:19:34,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/df4ee5c6a8d84cfabb82ac92c24915dd is 50, key is test_row_0/A:col10/1730989173378/Put/seqid=0 2024-11-07T14:19:34,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742285_1461 (size=12309) 2024-11-07T14:19:34,658 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/d57ba986568348e7b05b1fcf475f2a17 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/d57ba986568348e7b05b1fcf475f2a17 2024-11-07T14:19:34,664 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into d57ba986568348e7b05b1fcf475f2a17(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:34,664 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:34,664 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989174602; duration=0sec 2024-11-07T14:19:34,664 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:34,665 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:34,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742286_1462 (size=12151) 2024-11-07T14:19:35,067 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/df4ee5c6a8d84cfabb82ac92c24915dd 2024-11-07T14:19:35,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2163488c242b44f1899007c8510321bd is 50, key is test_row_0/B:col10/1730989173378/Put/seqid=0 2024-11-07T14:19:35,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742287_1463 (size=12151) 2024-11-07T14:19:35,079 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2163488c242b44f1899007c8510321bd 2024-11-07T14:19:35,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/cf45816e0aa94af9b74f20c61ee22ecc is 50, key is test_row_0/C:col10/1730989173378/Put/seqid=0 2024-11-07T14:19:35,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742288_1464 (size=12151) 2024-11-07T14:19:35,490 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/cf45816e0aa94af9b74f20c61ee22ecc 2024-11-07T14:19:35,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/df4ee5c6a8d84cfabb82ac92c24915dd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd 2024-11-07T14:19:35,497 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd, entries=150, sequenceid=154, filesize=11.9 K 2024-11-07T14:19:35,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2163488c242b44f1899007c8510321bd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd 2024-11-07T14:19:35,501 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd, entries=150, sequenceid=154, filesize=11.9 K 2024-11-07T14:19:35,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/cf45816e0aa94af9b74f20c61ee22ecc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc 2024-11-07T14:19:35,505 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc, entries=150, sequenceid=154, filesize=11.9 K 2024-11-07T14:19:35,506 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for e3403f2020527e03e67f0f2ab02983ef in 860ms, sequenceid=154, compaction requested=false 2024-11-07T14:19:35,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:35,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-07T14:19:35,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-07T14:19:35,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-07T14:19:35,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0840 sec 2024-11-07T14:19:35,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.0870 sec 2024-11-07T14:19:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-07T14:19:35,526 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-07T14:19:35,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:35,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-07T14:19:35,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:35,528 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:35,529 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:35,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:35,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:35,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3a24cc8792ba4421b52bff041d6ebae7 is 50, key is test_row_0/A:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:35,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742289_1465 (size=12151) 2024-11-07T14:19:35,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989235574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989235574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989235575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989235576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989235577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:35,680 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989235681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989235682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989235682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989235683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989235683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:35,833 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:35,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989235888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989235888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989235888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989235889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989235889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3a24cc8792ba4421b52bff041d6ebae7 2024-11-07T14:19:35,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/29273600e847495e8d4ef34411c2c9b6 is 50, key is test_row_0/B:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742290_1466 (size=12151) 2024-11-07T14:19:35,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/29273600e847495e8d4ef34411c2c9b6 2024-11-07T14:19:35,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/3c3da67daa014004aa8ca1bdc273deb6 is 50, key is test_row_0/C:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:35,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742291_1467 (size=12151) 2024-11-07T14:19:35,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:35,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:35,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:35,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:35,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:36,138 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989236196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,291 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:36,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:36,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:36,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/3c3da67daa014004aa8ca1bdc273deb6 2024-11-07T14:19:36,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3a24cc8792ba4421b52bff041d6ebae7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7 2024-11-07T14:19:36,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7, entries=150, sequenceid=168, filesize=11.9 K 2024-11-07T14:19:36,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/29273600e847495e8d4ef34411c2c9b6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6 2024-11-07T14:19:36,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6, entries=150, sequenceid=168, filesize=11.9 K 2024-11-07T14:19:36,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/3c3da67daa014004aa8ca1bdc273deb6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6 2024-11-07T14:19:36,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6, entries=150, sequenceid=168, filesize=11.9 K 2024-11-07T14:19:36,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e3403f2020527e03e67f0f2ab02983ef in 846ms, sequenceid=168, compaction requested=true 2024-11-07T14:19:36,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:36,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:36,399 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:36,399 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:36,399 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:36,399 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:36,400 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,400 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,400 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/df30f0ed34944201bccb8611f5ed8d36, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.8 K 2024-11-07T14:19:36,400 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/6dfdda269cdc48f58cd8ec40c84971b5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.8 K 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting df30f0ed34944201bccb8611f5ed8d36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dfdda269cdc48f58cd8ec40c84971b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2163488c242b44f1899007c8510321bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1730989173378 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting df4ee5c6a8d84cfabb82ac92c24915dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1730989173378 2024-11-07T14:19:36,400 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 29273600e847495e8d4ef34411c2c9b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:36,401 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a24cc8792ba4421b52bff041d6ebae7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:36,407 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:36,407 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:36,407 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2be2aad5f25443a89e592cdc7a2cb759 is 50, key is test_row_0/B:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:36,408 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/f31d8b40f6184934b7071bc0790be893 is 50, key is test_row_0/A:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:36,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742293_1469 (size=12561) 2024-11-07T14:19:36,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742292_1468 (size=12561) 2024-11-07T14:19:36,444 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-07T14:19:36,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,444 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:36,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/f29b2bd5f014400c855ecac472dea5f6 is 50, key is test_row_0/A:col10/1730989175574/Put/seqid=0 2024-11-07T14:19:36,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742294_1470 (size=12151) 2024-11-07T14:19:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:36,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:36,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989236709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989236711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989236712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989236713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989236715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,816 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/f31d8b40f6184934b7071bc0790be893 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f31d8b40f6184934b7071bc0790be893 2024-11-07T14:19:36,818 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2be2aad5f25443a89e592cdc7a2cb759 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2be2aad5f25443a89e592cdc7a2cb759 2024-11-07T14:19:36,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989236816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989236816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989236818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,821 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into f31d8b40f6184934b7071bc0790be893(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:36,821 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:36,821 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989176398; duration=0sec 2024-11-07T14:19:36,821 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:36,821 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:36,821 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:36,822 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:36,822 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:36,822 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:36,822 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/d57ba986568348e7b05b1fcf475f2a17, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=35.8 K 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d57ba986568348e7b05b1fcf475f2a17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1730989173349 2024-11-07T14:19:36,823 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into 2be2aad5f25443a89e592cdc7a2cb759(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:36,823 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989176399; duration=0sec 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf45816e0aa94af9b74f20c61ee22ecc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1730989173378 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:36,823 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c3da67daa014004aa8ca1bdc273deb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:36,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989236819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989236822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:36,830 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:36,831 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b9b38c0da2564b6aadb182a4ef594d88 is 50, key is test_row_0/C:col10/1730989175551/Put/seqid=0 2024-11-07T14:19:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742295_1471 (size=12561) 2024-11-07T14:19:36,852 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/f29b2bd5f014400c855ecac472dea5f6 2024-11-07T14:19:36,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/741dc0fe4e4f46358cbf9dfc12604c4f is 50, key is test_row_0/B:col10/1730989175574/Put/seqid=0 2024-11-07T14:19:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742296_1472 (size=12151) 2024-11-07T14:19:36,873 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/741dc0fe4e4f46358cbf9dfc12604c4f 2024-11-07T14:19:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ccd0e2f043ab49cc85e927f37244bd82 is 50, key is test_row_0/C:col10/1730989175574/Put/seqid=0 2024-11-07T14:19:36,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742297_1473 (size=12151) 2024-11-07T14:19:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989237021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989237021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989237023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989237025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989237030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,247 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b9b38c0da2564b6aadb182a4ef594d88 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b9b38c0da2564b6aadb182a4ef594d88 2024-11-07T14:19:37,253 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into b9b38c0da2564b6aadb182a4ef594d88(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:37,253 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:37,253 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989176399; duration=0sec 2024-11-07T14:19:37,253 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:37,253 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:37,317 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ccd0e2f043ab49cc85e927f37244bd82 2024-11-07T14:19:37,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/f29b2bd5f014400c855ecac472dea5f6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6 2024-11-07T14:19:37,326 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6, entries=150, sequenceid=191, filesize=11.9 K 2024-11-07T14:19:37,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/741dc0fe4e4f46358cbf9dfc12604c4f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f 2024-11-07T14:19:37,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989237324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989237325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,330 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f, entries=150, sequenceid=191, filesize=11.9 K 2024-11-07T14:19:37,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989237325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/ccd0e2f043ab49cc85e927f37244bd82 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82 2024-11-07T14:19:37,334 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82, entries=150, sequenceid=191, filesize=11.9 K 2024-11-07T14:19:37,335 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e3403f2020527e03e67f0f2ab02983ef in 891ms, sequenceid=191, compaction requested=false 2024-11-07T14:19:37,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:37,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:37,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-07T14:19:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-07T14:19:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:37,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:19:37,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-07T14:19:37,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8070 sec 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:37,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.8110 sec 2024-11-07T14:19:37,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/cb037c53216046ada7114206ae1d5bfd is 50, key is test_row_0/A:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:37,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742298_1474 (size=16931) 2024-11-07T14:19:37,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989237378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989237379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989237481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989237484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-07T14:19:37,632 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-07T14:19:37,633 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-07T14:19:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:37,635 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:37,635 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:37,635 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:37,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989237685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989237688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:37,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/cb037c53216046ada7114206ae1d5bfd 2024-11-07T14:19:37,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/76a54747240046d3bc52ee77a08d273c is 50, key is test_row_0/B:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:37,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742299_1475 (size=12151) 2024-11-07T14:19:37,786 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-07T14:19:37,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:37,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:37,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:37,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989237828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989237831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989237832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:37,939 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:37,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-07T14:19:37,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:37,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:37,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:37,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:37,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989237991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989237995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-07T14:19:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:38,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/76a54747240046d3bc52ee77a08d273c 2024-11-07T14:19:38,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/96264274b1b748f38aab09c126ac4d0b is 50, key is test_row_0/C:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:38,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742300_1476 (size=12151) 2024-11-07T14:19:38,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/96264274b1b748f38aab09c126ac4d0b 2024-11-07T14:19:38,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/cb037c53216046ada7114206ae1d5bfd as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd 2024-11-07T14:19:38,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd, entries=250, sequenceid=209, filesize=16.5 K 2024-11-07T14:19:38,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/76a54747240046d3bc52ee77a08d273c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c 2024-11-07T14:19:38,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T14:19:38,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/96264274b1b748f38aab09c126ac4d0b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b 2024-11-07T14:19:38,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b, entries=150, sequenceid=209, filesize=11.9 K 2024-11-07T14:19:38,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e3403f2020527e03e67f0f2ab02983ef in 864ms, sequenceid=209, compaction requested=true 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:38,202 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:38,202 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:38,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:38,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:38,203 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:38,203 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:38,204 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,204 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2be2aad5f25443a89e592cdc7a2cb759, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.0 K 2024-11-07T14:19:38,204 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2be2aad5f25443a89e592cdc7a2cb759, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:38,205 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 741dc0fe4e4f46358cbf9dfc12604c4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1730989175574 2024-11-07T14:19:38,205 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:38,205 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:38,205 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,205 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 76a54747240046d3bc52ee77a08d273c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:38,205 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f31d8b40f6184934b7071bc0790be893, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=40.7 K 2024-11-07T14:19:38,205 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f31d8b40f6184934b7071bc0790be893, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:38,206 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f29b2bd5f014400c855ecac472dea5f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1730989175574 2024-11-07T14:19:38,206 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb037c53216046ada7114206ae1d5bfd, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:38,217 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:38,218 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:38,218 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/ee031eafa2a24627bcf2f2e08c92dc3c is 50, key is test_row_0/B:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:38,218 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3bf2185cb5454984ac3383ef1c9a904d is 50, key is test_row_0/A:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742302_1478 (size=12663) 2024-11-07T14:19:38,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742301_1477 (size=12663) 2024-11-07T14:19:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:38,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-07T14:19:38,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,246 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T14:19:38,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:38,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:38,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:38,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:38,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:38,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:38,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3114c0f3f6f6409d93c7ee2a8950180d is 50, key is test_row_0/A:col10/1730989177377/Put/seqid=0 2024-11-07T14:19:38,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742303_1479 (size=12151) 2024-11-07T14:19:38,255 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3114c0f3f6f6409d93c7ee2a8950180d 2024-11-07T14:19:38,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/5bad309892264f74847d9fe097e68205 is 50, key is test_row_0/B:col10/1730989177377/Put/seqid=0 2024-11-07T14:19:38,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742304_1480 (size=12151) 2024-11-07T14:19:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:38,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:38,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989238523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989238526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,634 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/ee031eafa2a24627bcf2f2e08c92dc3c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/ee031eafa2a24627bcf2f2e08c92dc3c 2024-11-07T14:19:38,635 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3bf2185cb5454984ac3383ef1c9a904d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3bf2185cb5454984ac3383ef1c9a904d 2024-11-07T14:19:38,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989238632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989238632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,639 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into ee031eafa2a24627bcf2f2e08c92dc3c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:38,639 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989178202; duration=0sec 2024-11-07T14:19:38,639 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 3bf2185cb5454984ac3383ef1c9a904d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:38,639 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989178202; duration=0sec 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:38,639 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:38,640 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:38,640 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:38,640 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:38,640 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b9b38c0da2564b6aadb182a4ef594d88, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.0 K 2024-11-07T14:19:38,640 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9b38c0da2564b6aadb182a4ef594d88, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989175551 2024-11-07T14:19:38,640 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccd0e2f043ab49cc85e927f37244bd82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1730989175574 2024-11-07T14:19:38,641 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96264274b1b748f38aab09c126ac4d0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:38,647 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:38,647 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/2fd16b67d9c4483998ebe570f93b1e97 is 50, key is test_row_0/C:col10/1730989177337/Put/seqid=0 2024-11-07T14:19:38,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742305_1481 (size=12663) 2024-11-07T14:19:38,665 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/5bad309892264f74847d9fe097e68205 2024-11-07T14:19:38,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/707368059562464f96a27473cfd6eb47 is 50, key is test_row_0/C:col10/1730989177377/Put/seqid=0 2024-11-07T14:19:38,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742306_1482 (size=12151) 2024-11-07T14:19:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:38,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989238836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989238838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989238838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989238839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:38,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989238841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,056 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/2fd16b67d9c4483998ebe570f93b1e97 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/2fd16b67d9c4483998ebe570f93b1e97 2024-11-07T14:19:39,060 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 2fd16b67d9c4483998ebe570f93b1e97(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:39,060 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:39,060 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989178202; duration=0sec 2024-11-07T14:19:39,060 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:39,060 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:39,074 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/707368059562464f96a27473cfd6eb47 2024-11-07T14:19:39,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/3114c0f3f6f6409d93c7ee2a8950180d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d 2024-11-07T14:19:39,081 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T14:19:39,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/5bad309892264f74847d9fe097e68205 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205 2024-11-07T14:19:39,085 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T14:19:39,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/707368059562464f96a27473cfd6eb47 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47 2024-11-07T14:19:39,090 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47, entries=150, sequenceid=230, filesize=11.9 K 2024-11-07T14:19:39,091 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e3403f2020527e03e67f0f2ab02983ef in 845ms, sequenceid=230, compaction requested=false 2024-11-07T14:19:39,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:39,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:39,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-07T14:19:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-07T14:19:39,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-07T14:19:39,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4570 sec 2024-11-07T14:19:39,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.4610 sec 2024-11-07T14:19:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:39,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:39,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/bf610861233c42f8a2890aa3849ee10a is 50, key is test_row_0/A:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:39,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742307_1483 (size=14541) 2024-11-07T14:19:39,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989239191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989239196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989239297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989239302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989239502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989239507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/bf610861233c42f8a2890aa3849ee10a 2024-11-07T14:19:39,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/4f5c856585104e328f92b369bcb441db is 50, key is test_row_0/B:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:39,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742308_1484 (size=12151) 2024-11-07T14:19:39,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-07T14:19:39,739 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-07T14:19:39,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-07T14:19:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:39,742 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:39,743 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:39,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:39,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989239808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989239813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:39,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:39,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T14:19:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:39,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:39,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/4f5c856585104e328f92b369bcb441db 2024-11-07T14:19:39,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8fe96c17435045d1aa594859d0a273b8 is 50, key is test_row_0/C:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:39,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742309_1485 (size=12151) 2024-11-07T14:19:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:40,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T14:19:40,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:40,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,200 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T14:19:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989240315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989240317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:40,352 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T14:19:40,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:40,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:40,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8fe96c17435045d1aa594859d0a273b8 2024-11-07T14:19:40,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/bf610861233c42f8a2890aa3849ee10a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a 2024-11-07T14:19:40,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a, entries=200, sequenceid=249, filesize=14.2 K 2024-11-07T14:19:40,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/4f5c856585104e328f92b369bcb441db as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db 2024-11-07T14:19:40,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db, entries=150, sequenceid=249, filesize=11.9 K 2024-11-07T14:19:40,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/8fe96c17435045d1aa594859d0a273b8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8 2024-11-07T14:19:40,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8, entries=150, sequenceid=249, filesize=11.9 K 2024-11-07T14:19:40,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e3403f2020527e03e67f0f2ab02983ef in 1246ms, sequenceid=249, compaction requested=true 2024-11-07T14:19:40,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:40,389 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:40,389 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,389 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,390 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/ee031eafa2a24627bcf2f2e08c92dc3c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.1 K 2024-11-07T14:19:40,390 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3bf2185cb5454984ac3383ef1c9a904d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=38.4 K 2024-11-07T14:19:40,390 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ee031eafa2a24627bcf2f2e08c92dc3c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:40,390 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bf2185cb5454984ac3383ef1c9a904d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:40,390 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bad309892264f74847d9fe097e68205, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730989177370 2024-11-07T14:19:40,390 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3114c0f3f6f6409d93c7ee2a8950180d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730989177370 2024-11-07T14:19:40,390 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f5c856585104e328f92b369bcb441db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178521 2024-11-07T14:19:40,391 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf610861233c42f8a2890aa3849ee10a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178516 2024-11-07T14:19:40,403 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:40,404 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/a2a759049e0648eda5471cf2e683fce4 is 50, key is test_row_0/B:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:40,409 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:40,410 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d8649cd3a3d842a58bd4104dc9a770eb is 50, key is test_row_0/A:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:40,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742310_1486 (size=12765) 2024-11-07T14:19:40,416 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/a2a759049e0648eda5471cf2e683fce4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/a2a759049e0648eda5471cf2e683fce4 2024-11-07T14:19:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742311_1487 (size=12765) 2024-11-07T14:19:40,421 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into a2a759049e0648eda5471cf2e683fce4(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:40,421 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:40,421 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989180389; duration=0sec 2024-11-07T14:19:40,421 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:40,421 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:40,421 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:40,423 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d8649cd3a3d842a58bd4104dc9a770eb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d8649cd3a3d842a58bd4104dc9a770eb 2024-11-07T14:19:40,423 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:40,423 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:40,423 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,423 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/2fd16b67d9c4483998ebe570f93b1e97, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.1 K 2024-11-07T14:19:40,424 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fd16b67d9c4483998ebe570f93b1e97, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1730989176705 2024-11-07T14:19:40,424 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 707368059562464f96a27473cfd6eb47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1730989177370 2024-11-07T14:19:40,424 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fe96c17435045d1aa594859d0a273b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178521 2024-11-07T14:19:40,427 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into d8649cd3a3d842a58bd4104dc9a770eb(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:40,427 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:40,427 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989180388; duration=0sec 2024-11-07T14:19:40,427 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:40,427 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:40,431 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:40,432 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/afbc4050872547d0b240623a7b32180f is 50, key is test_row_0/C:col10/1730989179141/Put/seqid=0 2024-11-07T14:19:40,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742312_1488 (size=12765) 2024-11-07T14:19:40,505 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-07T14:19:40,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,506 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d5ceb58cf9004c6e856d5a4f1b1081f2 is 50, key is test_row_0/A:col10/1730989179190/Put/seqid=0 2024-11-07T14:19:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742313_1489 (size=12301) 2024-11-07T14:19:40,842 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/afbc4050872547d0b240623a7b32180f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/afbc4050872547d0b240623a7b32180f 2024-11-07T14:19:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:40,846 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into afbc4050872547d0b240623a7b32180f(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:40,846 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:40,846 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989180389; duration=0sec 2024-11-07T14:19:40,846 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:40,846 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:40,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989240874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989240876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989240876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:40,916 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d5ceb58cf9004c6e856d5a4f1b1081f2 2024-11-07T14:19:40,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14d148abfc37427fb03774cec087a319 is 50, key is test_row_0/B:col10/1730989179190/Put/seqid=0 2024-11-07T14:19:40,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742314_1490 (size=12301) 2024-11-07T14:19:40,928 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14d148abfc37427fb03774cec087a319 2024-11-07T14:19:40,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/467c0e1369f6483f9ec46a722d22f07c is 50, key is test_row_0/C:col10/1730989179190/Put/seqid=0 2024-11-07T14:19:40,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742315_1491 (size=12301) 2024-11-07T14:19:40,937 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/467c0e1369f6483f9ec46a722d22f07c 2024-11-07T14:19:40,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/d5ceb58cf9004c6e856d5a4f1b1081f2 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2 2024-11-07T14:19:40,945 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2, entries=150, sequenceid=271, filesize=12.0 K 2024-11-07T14:19:40,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14d148abfc37427fb03774cec087a319 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319 2024-11-07T14:19:40,949 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319, entries=150, sequenceid=271, filesize=12.0 K 2024-11-07T14:19:40,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/467c0e1369f6483f9ec46a722d22f07c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c 2024-11-07T14:19:40,954 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c, entries=150, sequenceid=271, filesize=12.0 K 2024-11-07T14:19:40,955 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e3403f2020527e03e67f0f2ab02983ef in 449ms, sequenceid=271, compaction requested=false 2024-11-07T14:19:40,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:40,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:40,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-07T14:19:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-07T14:19:40,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-07T14:19:40,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2140 sec 2024-11-07T14:19:40,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.2170 sec 2024-11-07T14:19:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:40,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:40,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:40,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/ae976d1862a74b0d94885725ca016f64 is 50, key is test_row_0/A:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:40,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742316_1492 (size=14741) 2024-11-07T14:19:40,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/ae976d1862a74b0d94885725ca016f64 2024-11-07T14:19:40,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/0ef74470a02d47eb8ccc7ff13141246b is 50, key is test_row_0/B:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742317_1493 (size=12301) 2024-11-07T14:19:41,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/0ef74470a02d47eb8ccc7ff13141246b 2024-11-07T14:19:41,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/f1b4027678d340aeb7313d5a995876f9 is 50, key is test_row_0/C:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:41,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742318_1494 (size=12301) 2024-11-07T14:19:41,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989241328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989241328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/f1b4027678d340aeb7313d5a995876f9 2024-11-07T14:19:41,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/ae976d1862a74b0d94885725ca016f64 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64 2024-11-07T14:19:41,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64, entries=200, sequenceid=289, filesize=14.4 K 2024-11-07T14:19:41,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/0ef74470a02d47eb8ccc7ff13141246b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b 2024-11-07T14:19:41,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b, entries=150, sequenceid=289, filesize=12.0 K 2024-11-07T14:19:41,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/f1b4027678d340aeb7313d5a995876f9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9 2024-11-07T14:19:41,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9, entries=150, sequenceid=289, filesize=12.0 K 2024-11-07T14:19:41,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e3403f2020527e03e67f0f2ab02983ef in 447ms, sequenceid=289, compaction requested=true 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:41,428 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:41,428 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:41,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:41,432 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39807 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:41,432 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:41,432 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:41,432 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:41,432 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:41,432 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d8649cd3a3d842a58bd4104dc9a770eb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=38.9 K 2024-11-07T14:19:41,432 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:41,433 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/a2a759049e0648eda5471cf2e683fce4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.5 K 2024-11-07T14:19:41,433 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8649cd3a3d842a58bd4104dc9a770eb, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178521 2024-11-07T14:19:41,433 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a2a759049e0648eda5471cf2e683fce4, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178521 2024-11-07T14:19:41,433 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5ceb58cf9004c6e856d5a4f1b1081f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1730989179181 2024-11-07T14:19:41,433 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae976d1862a74b0d94885725ca016f64, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180869 2024-11-07T14:19:41,433 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 14d148abfc37427fb03774cec087a319, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1730989179181 2024-11-07T14:19:41,434 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ef74470a02d47eb8ccc7ff13141246b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180871 2024-11-07T14:19:41,440 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#419 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:41,440 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:41,441 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/5374297296794fd59a83623e5bf2d096 is 50, key is test_row_0/A:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:41,441 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/6f8713b12c394296afc497de1d01bced is 50, key is test_row_0/B:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:41,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742319_1495 (size=13017) 2024-11-07T14:19:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742320_1496 (size=13017) 2024-11-07T14:19:41,458 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/6f8713b12c394296afc497de1d01bced as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6f8713b12c394296afc497de1d01bced 2024-11-07T14:19:41,463 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into 6f8713b12c394296afc497de1d01bced(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:41,463 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:41,463 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989181428; duration=0sec 2024-11-07T14:19:41,463 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:41,463 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:41,463 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:41,464 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:41,464 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:41,464 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:41,465 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/afbc4050872547d0b240623a7b32180f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.5 K 2024-11-07T14:19:41,465 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting afbc4050872547d0b240623a7b32180f, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1730989178521 2024-11-07T14:19:41,465 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 467c0e1369f6483f9ec46a722d22f07c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1730989179181 2024-11-07T14:19:41,466 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f1b4027678d340aeb7313d5a995876f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180871 2024-11-07T14:19:41,475 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:41,475 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/5f9f4770aacd4d3890f170847101f125 is 50, key is test_row_0/C:col10/1730989180875/Put/seqid=0 2024-11-07T14:19:41,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742321_1497 (size=13017) 2024-11-07T14:19:41,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:41,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:41,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:41,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/e96d6d67097a4b9698f55f467d67f087 is 50, key is test_row_0/A:col10/1730989181619/Put/seqid=0 2024-11-07T14:19:41,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742322_1498 (size=14741) 2024-11-07T14:19:41,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-07T14:19:41,846 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-07T14:19:41,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:41,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-07T14:19:41,849 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:41,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:41,849 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:41,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:41,850 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/5374297296794fd59a83623e5bf2d096 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/5374297296794fd59a83623e5bf2d096 2024-11-07T14:19:41,854 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 5374297296794fd59a83623e5bf2d096(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:41,854 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:41,854 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989181428; duration=0sec 2024-11-07T14:19:41,854 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:41,854 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:41,894 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/5f9f4770aacd4d3890f170847101f125 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/5f9f4770aacd4d3890f170847101f125 2024-11-07T14:19:41,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 5f9f4770aacd4d3890f170847101f125(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:41,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:41,899 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989181428; duration=0sec 2024-11-07T14:19:41,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:41,899 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:41,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989241959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989241960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989241960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T14:19:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/e96d6d67097a4b9698f55f467d67f087 2024-11-07T14:19:42,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/33ef4b695066430390a811dca6f90eea is 50, key is test_row_0/B:col10/1730989181619/Put/seqid=0 2024-11-07T14:19:42,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742323_1499 (size=12301) 2024-11-07T14:19:42,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/33ef4b695066430390a811dca6f90eea 2024-11-07T14:19:42,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/a1c8d956951547a89ee2c189eca7303c is 50, key is test_row_0/C:col10/1730989181619/Put/seqid=0 2024-11-07T14:19:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742324_1500 (size=12301) 2024-11-07T14:19:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:42,154 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T14:19:42,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:42,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989242264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989242264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989242264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,307 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T14:19:42,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:42,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/a1c8d956951547a89ee2c189eca7303c 2024-11-07T14:19:42,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/e96d6d67097a4b9698f55f467d67f087 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087 2024-11-07T14:19:42,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T14:19:42,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:42,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:42,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087, entries=200, sequenceid=310, filesize=14.4 K 2024-11-07T14:19:42,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/33ef4b695066430390a811dca6f90eea as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea 2024-11-07T14:19:42,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea, entries=150, sequenceid=310, filesize=12.0 K 2024-11-07T14:19:42,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/a1c8d956951547a89ee2c189eca7303c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c 2024-11-07T14:19:42,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c, entries=150, sequenceid=310, filesize=12.0 K 2024-11-07T14:19:42,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e3403f2020527e03e67f0f2ab02983ef in 851ms, sequenceid=310, compaction requested=false 2024-11-07T14:19:42,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:42,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-07T14:19:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:42,614 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:42,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:42,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/4f44dd754e804f03ae26e3749ceb486b is 50, key is test_row_0/A:col10/1730989181651/Put/seqid=0 2024-11-07T14:19:42,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742325_1501 (size=12301) 2024-11-07T14:19:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:42,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989242823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989242823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989242823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989242928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989242928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989242929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:43,032 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/4f44dd754e804f03ae26e3749ceb486b 2024-11-07T14:19:43,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2683b20ba68e4d4da92ff6c8ceb90990 is 50, key is test_row_0/B:col10/1730989181651/Put/seqid=0 2024-11-07T14:19:43,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742326_1502 (size=12301) 2024-11-07T14:19:43,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989243131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989243132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989243133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38008 deadline: 1730989243334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,337 DEBUG [Thread-1937 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:19:43,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38078 deadline: 1730989243344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,347 DEBUG [Thread-1935 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., hostname=69430dbfd73f,45917,1730989044081, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:19:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989243438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989243439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989243440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,442 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2683b20ba68e4d4da92ff6c8ceb90990 2024-11-07T14:19:43,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b40bee20ddf549cb824b7bb6d6fed819 is 50, key is test_row_0/C:col10/1730989181651/Put/seqid=0 2024-11-07T14:19:43,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742327_1503 (size=12301) 2024-11-07T14:19:43,854 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b40bee20ddf549cb824b7bb6d6fed819 2024-11-07T14:19:43,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/4f44dd754e804f03ae26e3749ceb486b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b 2024-11-07T14:19:43,860 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b, entries=150, sequenceid=328, filesize=12.0 K 2024-11-07T14:19:43,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/2683b20ba68e4d4da92ff6c8ceb90990 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990 2024-11-07T14:19:43,864 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990, entries=150, sequenceid=328, filesize=12.0 K 2024-11-07T14:19:43,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b40bee20ddf549cb824b7bb6d6fed819 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819 2024-11-07T14:19:43,868 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819, entries=150, sequenceid=328, filesize=12.0 K 2024-11-07T14:19:43,869 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e3403f2020527e03e67f0f2ab02983ef in 1254ms, sequenceid=328, compaction requested=true 2024-11-07T14:19:43,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:43,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:43,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-07T14:19:43,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-07T14:19:43,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-07T14:19:43,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0230 sec 2024-11-07T14:19:43,875 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.0280 sec 2024-11-07T14:19:43,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:43,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:43,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/a7f114ef706a4bbfb0792180dbf526da is 50, key is test_row_0/A:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:43,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742328_1504 (size=14741) 2024-11-07T14:19:43,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-07T14:19:43,953 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-07T14:19:43,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:43,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-07T14:19:43,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:43,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:43,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:43,957 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:43,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989243965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989243965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:43,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:43,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989243966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:44,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989244070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989244070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989244072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:44,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989244275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989244276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989244276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/a7f114ef706a4bbfb0792180dbf526da 2024-11-07T14:19:44,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e2e057286efd4a49be3e8e441bfd63a4 is 50, key is test_row_0/B:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:44,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742329_1505 (size=12301) 2024-11-07T14:19:44,414 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:44,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:44,567 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:44,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989244579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989244581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989244582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,720 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:44,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:44,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e2e057286efd4a49be3e8e441bfd63a4 2024-11-07T14:19:44,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/9152b8fa241441b094329137647b8973 is 50, key is test_row_0/C:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:44,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742330_1506 (size=12301) 2024-11-07T14:19:44,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/9152b8fa241441b094329137647b8973 2024-11-07T14:19:44,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/a7f114ef706a4bbfb0792180dbf526da as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da 2024-11-07T14:19:44,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da, entries=200, sequenceid=349, filesize=14.4 K 2024-11-07T14:19:44,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/e2e057286efd4a49be3e8e441bfd63a4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4 2024-11-07T14:19:44,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4, entries=150, sequenceid=349, filesize=12.0 K 2024-11-07T14:19:44,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/9152b8fa241441b094329137647b8973 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973 2024-11-07T14:19:44,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973, entries=150, sequenceid=349, filesize=12.0 K 2024-11-07T14:19:44,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e3403f2020527e03e67f0f2ab02983ef in 846ms, sequenceid=349, compaction requested=true 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:44,789 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:44,789 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:44,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:44,790 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:44,790 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:44,790 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,790 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/5374297296794fd59a83623e5bf2d096, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=53.5 K 2024-11-07T14:19:44,790 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:44,791 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:44,791 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,791 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6f8713b12c394296afc497de1d01bced, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=48.8 K 2024-11-07T14:19:44,791 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f8713b12c394296afc497de1d01bced, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180871 2024-11-07T14:19:44,791 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5374297296794fd59a83623e5bf2d096, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180871 2024-11-07T14:19:44,791 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 33ef4b695066430390a811dca6f90eea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1730989181005 2024-11-07T14:19:44,792 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e96d6d67097a4b9698f55f467d67f087, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1730989181003 2024-11-07T14:19:44,792 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 2683b20ba68e4d4da92ff6c8ceb90990, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1730989181640 2024-11-07T14:19:44,792 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f44dd754e804f03ae26e3749ceb486b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1730989181640 2024-11-07T14:19:44,792 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7f114ef706a4bbfb0792180dbf526da, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182792 2024-11-07T14:19:44,792 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e2e057286efd4a49be3e8e441bfd63a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182794 2024-11-07T14:19:44,799 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:44,800 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/fafcce2dcd8f4b19b9c16a853c2c28fb is 50, key is test_row_0/B:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:44,800 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:44,801 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/153f664f1a9140be8faa213cc65a3050 is 50, key is test_row_0/A:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:44,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742331_1507 (size=13153) 2024-11-07T14:19:44,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742332_1508 (size=13153) 2024-11-07T14:19:44,809 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/153f664f1a9140be8faa213cc65a3050 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/153f664f1a9140be8faa213cc65a3050 2024-11-07T14:19:44,813 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 153f664f1a9140be8faa213cc65a3050(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:44,813 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:44,813 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=12, startTime=1730989184789; duration=0sec 2024-11-07T14:19:44,813 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:44,813 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:44,813 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:19:44,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:19:44,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:44,814 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,814 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/5f9f4770aacd4d3890f170847101f125, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=48.8 K 2024-11-07T14:19:44,814 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f9f4770aacd4d3890f170847101f125, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1730989180871 2024-11-07T14:19:44,815 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1c8d956951547a89ee2c189eca7303c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1730989181005 2024-11-07T14:19:44,815 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting b40bee20ddf549cb824b7bb6d6fed819, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1730989181640 2024-11-07T14:19:44,815 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9152b8fa241441b094329137647b8973, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182794 2024-11-07T14:19:44,823 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#433 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:44,824 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/0dc1f6eb80374d22bd9443086fa0e38c is 50, key is test_row_0/C:col10/1730989182794/Put/seqid=0 2024-11-07T14:19:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742333_1509 (size=13153) 2024-11-07T14:19:44,832 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/0dc1f6eb80374d22bd9443086fa0e38c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/0dc1f6eb80374d22bd9443086fa0e38c 2024-11-07T14:19:44,835 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 0dc1f6eb80374d22bd9443086fa0e38c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:44,835 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:44,835 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=12, startTime=1730989184789; duration=0sec 2024-11-07T14:19:44,835 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:44,836 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:44,873 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:44,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-07T14:19:44,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:44,873 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:44,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:44,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/52de4681c64d48f5bd134b7f680e3a1e is 50, key is test_row_0/A:col10/1730989183965/Put/seqid=0 2024-11-07T14:19:44,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742334_1510 (size=12301) 2024-11-07T14:19:45,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:45,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:45,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. as already flushing 2024-11-07T14:19:45,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989245115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989245120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989245122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,212 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/fafcce2dcd8f4b19b9c16a853c2c28fb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/fafcce2dcd8f4b19b9c16a853c2c28fb 2024-11-07T14:19:45,216 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into fafcce2dcd8f4b19b9c16a853c2c28fb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:45,216 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:45,216 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=12, startTime=1730989184789; duration=0sec 2024-11-07T14:19:45,216 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:45,216 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:45,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989245223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989245225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989245229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,282 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/52de4681c64d48f5bd134b7f680e3a1e 2024-11-07T14:19:45,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/6dcd40e3706b46379265562d62090898 is 50, key is test_row_0/B:col10/1730989183965/Put/seqid=0 2024-11-07T14:19:45,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742335_1511 (size=12301) 2024-11-07T14:19:45,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989245428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989245428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989245433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,692 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/6dcd40e3706b46379265562d62090898 2024-11-07T14:19:45,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7a59fb7eccf441fa8dd9ab5a439b74f7 is 50, key is test_row_0/C:col10/1730989183965/Put/seqid=0 2024-11-07T14:19:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742336_1512 (size=12301) 2024-11-07T14:19:45,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989245734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989245734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:45,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989245737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:46,112 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7a59fb7eccf441fa8dd9ab5a439b74f7 2024-11-07T14:19:46,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/52de4681c64d48f5bd134b7f680e3a1e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e 2024-11-07T14:19:46,119 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e, entries=150, sequenceid=366, filesize=12.0 K 2024-11-07T14:19:46,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/6dcd40e3706b46379265562d62090898 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898 2024-11-07T14:19:46,126 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898, entries=150, sequenceid=366, filesize=12.0 K 2024-11-07T14:19:46,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/7a59fb7eccf441fa8dd9ab5a439b74f7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7 2024-11-07T14:19:46,130 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7, entries=150, sequenceid=366, filesize=12.0 K 2024-11-07T14:19:46,132 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e3403f2020527e03e67f0f2ab02983ef in 1259ms, sequenceid=366, compaction requested=false 2024-11-07T14:19:46,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:46,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:46,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-07T14:19:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-07T14:19:46,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-07T14:19:46,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1760 sec 2024-11-07T14:19:46,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.1830 sec 2024-11-07T14:19:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:46,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:46,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:46,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64bb699d7a32406e9159ab5176a6752a is 50, key is test_row_0/A:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:46,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742337_1513 (size=14741) 2024-11-07T14:19:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989246267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989246268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989246268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989246372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989246376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989246376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989246575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989246581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989246581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64bb699d7a32406e9159ab5176a6752a 2024-11-07T14:19:46,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/f4f8b58939044070b676ce1606bf802a is 50, key is test_row_0/B:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:46,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742338_1514 (size=12301) 2024-11-07T14:19:46,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38032 deadline: 1730989246880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38046 deadline: 1730989246885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:46,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38098 deadline: 1730989246888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:47,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/f4f8b58939044070b676ce1606bf802a 2024-11-07T14:19:47,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/97f9f6e946b443ee96dc3c14aedb3ce3 is 50, key is test_row_0/C:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:47,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742339_1515 (size=12301) 2024-11-07T14:19:47,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/97f9f6e946b443ee96dc3c14aedb3ce3 2024-11-07T14:19:47,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/64bb699d7a32406e9159ab5176a6752a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a 2024-11-07T14:19:47,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a, entries=200, sequenceid=389, filesize=14.4 K 2024-11-07T14:19:47,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/f4f8b58939044070b676ce1606bf802a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a 2024-11-07T14:19:47,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a, entries=150, sequenceid=389, filesize=12.0 K 2024-11-07T14:19:47,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/97f9f6e946b443ee96dc3c14aedb3ce3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3 2024-11-07T14:19:47,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3, entries=150, sequenceid=389, filesize=12.0 K 2024-11-07T14:19:47,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for e3403f2020527e03e67f0f2ab02983ef in 854ms, sequenceid=389, compaction requested=true 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:47,096 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:47,096 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3403f2020527e03e67f0f2ab02983ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/A is initiating minor compaction (all files) 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/B is initiating minor compaction (all files) 2024-11-07T14:19:47,097 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/A in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:47,097 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/B in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:47,097 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/153f664f1a9140be8faa213cc65a3050, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=39.3 K 2024-11-07T14:19:47,097 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/fafcce2dcd8f4b19b9c16a853c2c28fb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.9 K 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 153f664f1a9140be8faa213cc65a3050, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182794 2024-11-07T14:19:47,097 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting fafcce2dcd8f4b19b9c16a853c2c28fb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182794 2024-11-07T14:19:47,098 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52de4681c64d48f5bd134b7f680e3a1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1730989183963 2024-11-07T14:19:47,098 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dcd40e3706b46379265562d62090898, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1730989183963 2024-11-07T14:19:47,098 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64bb699d7a32406e9159ab5176a6752a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1730989185109 2024-11-07T14:19:47,098 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f4f8b58939044070b676ce1606bf802a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1730989185109 2024-11-07T14:19:47,101 DEBUG [Thread-1950 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:51818 2024-11-07T14:19:47,101 DEBUG [Thread-1950 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,105 DEBUG [Thread-1946 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:51818 2024-11-07T14:19:47,105 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#B#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:47,105 DEBUG [Thread-1946 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,105 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/9f38adb03a74442a8178eab55df92f9f is 50, key is test_row_0/B:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:47,105 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#A#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:47,106 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/4262c85b2b9f48fdad54252beb5c5b7d is 50, key is test_row_0/A:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:47,106 DEBUG [Thread-1954 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73d92042 to 127.0.0.1:51818 2024-11-07T14:19:47,106 DEBUG [Thread-1954 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,107 DEBUG [Thread-1952 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:51818 2024-11-07T14:19:47,107 DEBUG [Thread-1952 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,110 DEBUG [Thread-1948 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:51818 2024-11-07T14:19:47,110 DEBUG [Thread-1948 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742340_1516 (size=13255) 2024-11-07T14:19:47,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742341_1517 (size=13255) 2024-11-07T14:19:47,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:47,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-07T14:19:47,344 DEBUG [Thread-1937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:51818 2024-11-07T14:19:47,345 DEBUG [Thread-1937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:47,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:47,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/b04f77651e4441ad9cb966a6d061b12d is 50, key is test_row_0/A:col10/1730989186261/Put/seqid=0 2024-11-07T14:19:47,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742342_1518 (size=12301) 2024-11-07T14:19:47,384 DEBUG [Thread-1935 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:51818 2024-11-07T14:19:47,384 DEBUG [Thread-1935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,386 DEBUG [Thread-1941 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:51818 2024-11-07T14:19:47,387 DEBUG [Thread-1941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,393 DEBUG [Thread-1943 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:51818 2024-11-07T14:19:47,393 DEBUG [Thread-1943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,395 DEBUG [Thread-1939 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:51818 2024-11-07T14:19:47,395 DEBUG [Thread-1939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:47,518 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/9f38adb03a74442a8178eab55df92f9f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/9f38adb03a74442a8178eab55df92f9f 2024-11-07T14:19:47,518 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/4262c85b2b9f48fdad54252beb5c5b7d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4262c85b2b9f48fdad54252beb5c5b7d 2024-11-07T14:19:47,522 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/B of e3403f2020527e03e67f0f2ab02983ef into 9f38adb03a74442a8178eab55df92f9f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:47,522 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/A of e3403f2020527e03e67f0f2ab02983ef into 4262c85b2b9f48fdad54252beb5c5b7d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:47,522 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/A, priority=13, startTime=1730989187096; duration=0sec 2024-11-07T14:19:47,522 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/B, priority=13, startTime=1730989187096; duration=0sec 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:B 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:A 2024-11-07T14:19:47,522 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:47,523 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:47,523 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): e3403f2020527e03e67f0f2ab02983ef/C is initiating minor compaction (all files) 2024-11-07T14:19:47,523 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e3403f2020527e03e67f0f2ab02983ef/C in TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:47,523 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/0dc1f6eb80374d22bd9443086fa0e38c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp, totalSize=36.9 K 2024-11-07T14:19:47,523 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dc1f6eb80374d22bd9443086fa0e38c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1730989182794 2024-11-07T14:19:47,523 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a59fb7eccf441fa8dd9ab5a439b74f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1730989183963 2024-11-07T14:19:47,523 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97f9f6e946b443ee96dc3c14aedb3ce3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1730989185109 2024-11-07T14:19:47,528 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3403f2020527e03e67f0f2ab02983ef#C#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:47,528 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/600fdfc958974c28b12e3da0bd5cd945 is 50, key is test_row_0/C:col10/1730989186240/Put/seqid=0 2024-11-07T14:19:47,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742343_1519 (size=13255) 2024-11-07T14:19:47,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/b04f77651e4441ad9cb966a6d061b12d 2024-11-07T14:19:47,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/abd641446b61418f8ea452c1035ddffc is 50, key is test_row_0/B:col10/1730989186261/Put/seqid=0 2024-11-07T14:19:47,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742344_1520 (size=12301) 2024-11-07T14:19:47,935 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/600fdfc958974c28b12e3da0bd5cd945 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/600fdfc958974c28b12e3da0bd5cd945 2024-11-07T14:19:47,938 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e3403f2020527e03e67f0f2ab02983ef/C of e3403f2020527e03e67f0f2ab02983ef into 600fdfc958974c28b12e3da0bd5cd945(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:47,938 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:47,938 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef., storeName=e3403f2020527e03e67f0f2ab02983ef/C, priority=13, startTime=1730989187096; duration=0sec 2024-11-07T14:19:47,938 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:47,938 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3403f2020527e03e67f0f2ab02983ef:C 2024-11-07T14:19:48,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-07T14:19:48,061 INFO [Thread-1945 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2884 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8652 rows 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2897 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8689 rows 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2873 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8619 rows 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2871 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8613 rows 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2869 2024-11-07T14:19:48,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8607 rows 2024-11-07T14:19:48,061 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:19:48,061 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:51818 2024-11-07T14:19:48,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:19:48,063 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:19:48,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:19:48,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:48,067 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989188067"}]},"ts":"1730989188067"} 2024-11-07T14:19:48,068 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:19:48,071 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:19:48,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:19:48,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, UNASSIGN}] 2024-11-07T14:19:48,073 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, UNASSIGN 2024-11-07T14:19:48,073 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=e3403f2020527e03e67f0f2ab02983ef, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:48,074 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:19:48,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:48,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/abd641446b61418f8ea452c1035ddffc 2024-11-07T14:19:48,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b72d3b71972749deb697f77f9dc6c41f is 50, key is test_row_0/C:col10/1730989186261/Put/seqid=0 2024-11-07T14:19:48,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742345_1521 (size=12301) 2024-11-07T14:19:48,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:48,226 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:48,226 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:19:48,226 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing e3403f2020527e03e67f0f2ab02983ef, disabling compactions & flushes 2024-11-07T14:19:48,226 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:48,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:48,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b72d3b71972749deb697f77f9dc6c41f 2024-11-07T14:19:48,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/b04f77651e4441ad9cb966a6d061b12d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/b04f77651e4441ad9cb966a6d061b12d 2024-11-07T14:19:48,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/b04f77651e4441ad9cb966a6d061b12d, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T14:19:48,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/abd641446b61418f8ea452c1035ddffc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/abd641446b61418f8ea452c1035ddffc 2024-11-07T14:19:48,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/abd641446b61418f8ea452c1035ddffc, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T14:19:48,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b72d3b71972749deb697f77f9dc6c41f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b72d3b71972749deb697f77f9dc6c41f 2024-11-07T14:19:48,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b72d3b71972749deb697f77f9dc6c41f, entries=150, sequenceid=405, filesize=12.0 K 2024-11-07T14:19:48,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=26.84 KB/27480 for e3403f2020527e03e67f0f2ab02983ef in 1237ms, sequenceid=405, compaction requested=false 2024-11-07T14:19:48,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:48,581 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:48,581 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:48,581 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. after waiting 0 ms 2024-11-07T14:19:48,581 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:48,581 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing e3403f2020527e03e67f0f2ab02983ef 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=A 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=B 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e3403f2020527e03e67f0f2ab02983ef, store=C 2024-11-07T14:19:48,582 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:48,584 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/9cccef076d8b4be2aeadbf63bbed5706 is 50, key is test_row_1/A:col10/1730989187392/Put/seqid=0 2024-11-07T14:19:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742346_1522 (size=9857) 2024-11-07T14:19:48,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:48,988 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/9cccef076d8b4be2aeadbf63bbed5706 2024-11-07T14:19:48,993 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14c85735e36d41a187deb1861949db64 is 50, key is test_row_1/B:col10/1730989187392/Put/seqid=0 2024-11-07T14:19:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742347_1523 (size=9857) 2024-11-07T14:19:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:49,396 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14c85735e36d41a187deb1861949db64 2024-11-07T14:19:49,401 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b50d895368a14ce1bc06ebd511a140f9 is 50, key is test_row_1/C:col10/1730989187392/Put/seqid=0 2024-11-07T14:19:49,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742348_1524 (size=9857) 2024-11-07T14:19:49,804 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b50d895368a14ce1bc06ebd511a140f9 2024-11-07T14:19:49,807 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/A/9cccef076d8b4be2aeadbf63bbed5706 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/9cccef076d8b4be2aeadbf63bbed5706 2024-11-07T14:19:49,810 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/9cccef076d8b4be2aeadbf63bbed5706, entries=100, sequenceid=415, filesize=9.6 K 2024-11-07T14:19:49,810 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/B/14c85735e36d41a187deb1861949db64 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14c85735e36d41a187deb1861949db64 2024-11-07T14:19:49,812 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14c85735e36d41a187deb1861949db64, entries=100, sequenceid=415, filesize=9.6 K 2024-11-07T14:19:49,813 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/.tmp/C/b50d895368a14ce1bc06ebd511a140f9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b50d895368a14ce1bc06ebd511a140f9 2024-11-07T14:19:49,815 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b50d895368a14ce1bc06ebd511a140f9, entries=100, sequenceid=415, filesize=9.6 K 2024-11-07T14:19:49,816 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for e3403f2020527e03e67f0f2ab02983ef in 1235ms, sequenceid=415, compaction requested=true 2024-11-07T14:19:49,816 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/8f954887c1ed4b1c9c7f7777b07a9638, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/848a425a7d2b44439070f73e37758456, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/6dfdda269cdc48f58cd8ec40c84971b5, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f31d8b40f6184934b7071bc0790be893, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3bf2185cb5454984ac3383ef1c9a904d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d8649cd3a3d842a58bd4104dc9a770eb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/5374297296794fd59a83623e5bf2d096, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/153f664f1a9140be8faa213cc65a3050, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a] to archive 2024-11-07T14:19:49,817 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:49,818 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/644c2342b4b84ddb81aaa9ffda6c4d4b 2024-11-07T14:19:49,819 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/437f4f9f15ea46f19cbada4f63e56e3e 2024-11-07T14:19:49,820 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/8f954887c1ed4b1c9c7f7777b07a9638 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/8f954887c1ed4b1c9c7f7777b07a9638 2024-11-07T14:19:49,821 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/00dfc91a29994836993d682d54a9c021 2024-11-07T14:19:49,822 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3ef192bd07454bcb835c15b2cb6bf595 2024-11-07T14:19:49,823 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64e29564b8804f68a5ee0e9894da47e8 2024-11-07T14:19:49,823 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/848a425a7d2b44439070f73e37758456 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/848a425a7d2b44439070f73e37758456 2024-11-07T14:19:49,824 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d98e86ffb87b40528ddd351f5561a1a9 2024-11-07T14:19:49,825 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/eb8c30b85743472d888ef638e70efc48 2024-11-07T14:19:49,826 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/6dfdda269cdc48f58cd8ec40c84971b5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/6dfdda269cdc48f58cd8ec40c84971b5 2024-11-07T14:19:49,827 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/df4ee5c6a8d84cfabb82ac92c24915dd 2024-11-07T14:19:49,828 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f31d8b40f6184934b7071bc0790be893 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f31d8b40f6184934b7071bc0790be893 2024-11-07T14:19:49,829 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3a24cc8792ba4421b52bff041d6ebae7 2024-11-07T14:19:49,830 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/f29b2bd5f014400c855ecac472dea5f6 2024-11-07T14:19:49,831 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/cb037c53216046ada7114206ae1d5bfd 2024-11-07T14:19:49,831 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3bf2185cb5454984ac3383ef1c9a904d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3bf2185cb5454984ac3383ef1c9a904d 2024-11-07T14:19:49,832 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/3114c0f3f6f6409d93c7ee2a8950180d 2024-11-07T14:19:49,833 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/bf610861233c42f8a2890aa3849ee10a 2024-11-07T14:19:49,834 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d8649cd3a3d842a58bd4104dc9a770eb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d8649cd3a3d842a58bd4104dc9a770eb 2024-11-07T14:19:49,835 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/d5ceb58cf9004c6e856d5a4f1b1081f2 2024-11-07T14:19:49,835 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/ae976d1862a74b0d94885725ca016f64 2024-11-07T14:19:49,836 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/5374297296794fd59a83623e5bf2d096 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/5374297296794fd59a83623e5bf2d096 2024-11-07T14:19:49,837 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/e96d6d67097a4b9698f55f467d67f087 2024-11-07T14:19:49,838 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4f44dd754e804f03ae26e3749ceb486b 2024-11-07T14:19:49,839 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/a7f114ef706a4bbfb0792180dbf526da 2024-11-07T14:19:49,839 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/153f664f1a9140be8faa213cc65a3050 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/153f664f1a9140be8faa213cc65a3050 2024-11-07T14:19:49,840 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/52de4681c64d48f5bd134b7f680e3a1e 2024-11-07T14:19:49,841 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/64bb699d7a32406e9159ab5176a6752a 2024-11-07T14:19:49,842 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d7f2fe32a3734067be22d5b1c1e8ebd8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/191bc0c402e4481a924c682eb58d42ac, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/df30f0ed34944201bccb8611f5ed8d36, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2be2aad5f25443a89e592cdc7a2cb759, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/ee031eafa2a24627bcf2f2e08c92dc3c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/a2a759049e0648eda5471cf2e683fce4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6f8713b12c394296afc497de1d01bced, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/fafcce2dcd8f4b19b9c16a853c2c28fb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a] to archive 2024-11-07T14:19:49,843 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:49,844 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/67b2d1ce91914642bf7a2e7255b3731b 2024-11-07T14:19:49,845 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/dbaee0ed0c1f47c0b4b1397909cfad86 2024-11-07T14:19:49,846 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d7f2fe32a3734067be22d5b1c1e8ebd8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d7f2fe32a3734067be22d5b1c1e8ebd8 2024-11-07T14:19:49,846 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e69d39654e6d4b948a7a7e77c2cf1a15 2024-11-07T14:19:49,847 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/d78f473dba654e9ea05c55e23ea2a1d6 2024-11-07T14:19:49,848 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/191bc0c402e4481a924c682eb58d42ac to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/191bc0c402e4481a924c682eb58d42ac 2024-11-07T14:19:49,849 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/cd620be867c448cf80e85e94e8d86e63 2024-11-07T14:19:49,849 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/1c99310d49254c4aaf7ae2baa125971b 2024-11-07T14:19:49,850 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/df30f0ed34944201bccb8611f5ed8d36 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/df30f0ed34944201bccb8611f5ed8d36 2024-11-07T14:19:49,851 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/3292ef3e41f841d3be4d78340a048711 2024-11-07T14:19:49,852 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2163488c242b44f1899007c8510321bd 2024-11-07T14:19:49,852 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2be2aad5f25443a89e592cdc7a2cb759 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2be2aad5f25443a89e592cdc7a2cb759 2024-11-07T14:19:49,853 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/29273600e847495e8d4ef34411c2c9b6 2024-11-07T14:19:49,854 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/741dc0fe4e4f46358cbf9dfc12604c4f 2024-11-07T14:19:49,855 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/ee031eafa2a24627bcf2f2e08c92dc3c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/ee031eafa2a24627bcf2f2e08c92dc3c 2024-11-07T14:19:49,855 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/76a54747240046d3bc52ee77a08d273c 2024-11-07T14:19:49,856 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/5bad309892264f74847d9fe097e68205 2024-11-07T14:19:49,857 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/a2a759049e0648eda5471cf2e683fce4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/a2a759049e0648eda5471cf2e683fce4 2024-11-07T14:19:49,858 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/4f5c856585104e328f92b369bcb441db 2024-11-07T14:19:49,859 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14d148abfc37427fb03774cec087a319 2024-11-07T14:19:49,859 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6f8713b12c394296afc497de1d01bced to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6f8713b12c394296afc497de1d01bced 2024-11-07T14:19:49,860 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/0ef74470a02d47eb8ccc7ff13141246b 2024-11-07T14:19:49,861 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/33ef4b695066430390a811dca6f90eea 2024-11-07T14:19:49,862 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/2683b20ba68e4d4da92ff6c8ceb90990 2024-11-07T14:19:49,863 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/fafcce2dcd8f4b19b9c16a853c2c28fb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/fafcce2dcd8f4b19b9c16a853c2c28fb 2024-11-07T14:19:49,863 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/e2e057286efd4a49be3e8e441bfd63a4 2024-11-07T14:19:49,864 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/6dcd40e3706b46379265562d62090898 2024-11-07T14:19:49,865 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/f4f8b58939044070b676ce1606bf802a 2024-11-07T14:19:49,866 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/27add678d4004869b0659bbf609606ec, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3a3be7d0a5f940729f395ff6e58ff9d0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/d57ba986568348e7b05b1fcf475f2a17, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b9b38c0da2564b6aadb182a4ef594d88, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/2fd16b67d9c4483998ebe570f93b1e97, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/afbc4050872547d0b240623a7b32180f, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/5f9f4770aacd4d3890f170847101f125, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/0dc1f6eb80374d22bd9443086fa0e38c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3] to archive 2024-11-07T14:19:49,867 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:19:49,868 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ad58e06505464133b59e9f2ada400bf9 2024-11-07T14:19:49,869 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/112a1009afe9402ab729d1749f68fb4a 2024-11-07T14:19:49,870 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/27add678d4004869b0659bbf609606ec to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/27add678d4004869b0659bbf609606ec 2024-11-07T14:19:49,870 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7698920be86648ecaf303d4197e801eb 2024-11-07T14:19:49,871 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/e64bd457c1fb4b3cafb395f83acb9a82 2024-11-07T14:19:49,872 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3a3be7d0a5f940729f395ff6e58ff9d0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3a3be7d0a5f940729f395ff6e58ff9d0 2024-11-07T14:19:49,873 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/1f57ee2e1f3b4b1499764a0632b73757 2024-11-07T14:19:49,874 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7c9f4c586c704bab9a69815bb1ab83f0 2024-11-07T14:19:49,874 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/d57ba986568348e7b05b1fcf475f2a17 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/d57ba986568348e7b05b1fcf475f2a17 2024-11-07T14:19:49,875 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8eb6617a6b52432f81fcebee0e0a3f97 2024-11-07T14:19:49,876 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/cf45816e0aa94af9b74f20c61ee22ecc 2024-11-07T14:19:49,877 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b9b38c0da2564b6aadb182a4ef594d88 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b9b38c0da2564b6aadb182a4ef594d88 2024-11-07T14:19:49,877 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/3c3da67daa014004aa8ca1bdc273deb6 2024-11-07T14:19:49,878 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/ccd0e2f043ab49cc85e927f37244bd82 2024-11-07T14:19:49,879 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/2fd16b67d9c4483998ebe570f93b1e97 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/2fd16b67d9c4483998ebe570f93b1e97 2024-11-07T14:19:49,880 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/96264274b1b748f38aab09c126ac4d0b 2024-11-07T14:19:49,881 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/707368059562464f96a27473cfd6eb47 2024-11-07T14:19:49,881 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/afbc4050872547d0b240623a7b32180f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/afbc4050872547d0b240623a7b32180f 2024-11-07T14:19:49,882 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/8fe96c17435045d1aa594859d0a273b8 2024-11-07T14:19:49,883 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/467c0e1369f6483f9ec46a722d22f07c 2024-11-07T14:19:49,883 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/5f9f4770aacd4d3890f170847101f125 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/5f9f4770aacd4d3890f170847101f125 2024-11-07T14:19:49,884 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/f1b4027678d340aeb7313d5a995876f9 2024-11-07T14:19:49,885 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/a1c8d956951547a89ee2c189eca7303c 2024-11-07T14:19:49,886 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b40bee20ddf549cb824b7bb6d6fed819 2024-11-07T14:19:49,886 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/0dc1f6eb80374d22bd9443086fa0e38c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/0dc1f6eb80374d22bd9443086fa0e38c 2024-11-07T14:19:49,887 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/9152b8fa241441b094329137647b8973 2024-11-07T14:19:49,888 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/7a59fb7eccf441fa8dd9ab5a439b74f7 2024-11-07T14:19:49,889 DEBUG [StoreCloser-TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/97f9f6e946b443ee96dc3c14aedb3ce3 2024-11-07T14:19:49,892 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/recovered.edits/418.seqid, newMaxSeqId=418, maxSeqId=1 2024-11-07T14:19:49,893 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef. 2024-11-07T14:19:49,893 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for e3403f2020527e03e67f0f2ab02983ef: 2024-11-07T14:19:49,894 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:49,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=e3403f2020527e03e67f0f2ab02983ef, regionState=CLOSED 2024-11-07T14:19:49,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-07T14:19:49,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure e3403f2020527e03e67f0f2ab02983ef, server=69430dbfd73f,45917,1730989044081 in 1.8210 sec 2024-11-07T14:19:49,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-11-07T14:19:49,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e3403f2020527e03e67f0f2ab02983ef, UNASSIGN in 1.8240 sec 2024-11-07T14:19:49,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-07T14:19:49,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8260 sec 2024-11-07T14:19:49,899 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989189899"}]},"ts":"1730989189899"} 2024-11-07T14:19:49,900 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:19:49,903 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:19:49,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8400 sec 2024-11-07T14:19:50,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-07T14:19:50,170 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-07T14:19:50,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:19:50,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,172 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T14:19:50,172 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,174 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:50,176 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/recovered.edits] 2024-11-07T14:19:50,178 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4262c85b2b9f48fdad54252beb5c5b7d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/4262c85b2b9f48fdad54252beb5c5b7d 2024-11-07T14:19:50,179 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/9cccef076d8b4be2aeadbf63bbed5706 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/9cccef076d8b4be2aeadbf63bbed5706 2024-11-07T14:19:50,179 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/b04f77651e4441ad9cb966a6d061b12d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/A/b04f77651e4441ad9cb966a6d061b12d 2024-11-07T14:19:50,181 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14c85735e36d41a187deb1861949db64 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/14c85735e36d41a187deb1861949db64 2024-11-07T14:19:50,182 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/9f38adb03a74442a8178eab55df92f9f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/9f38adb03a74442a8178eab55df92f9f 2024-11-07T14:19:50,183 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/abd641446b61418f8ea452c1035ddffc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/B/abd641446b61418f8ea452c1035ddffc 2024-11-07T14:19:50,184 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/600fdfc958974c28b12e3da0bd5cd945 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/600fdfc958974c28b12e3da0bd5cd945 2024-11-07T14:19:50,185 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b50d895368a14ce1bc06ebd511a140f9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b50d895368a14ce1bc06ebd511a140f9 2024-11-07T14:19:50,186 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b72d3b71972749deb697f77f9dc6c41f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/C/b72d3b71972749deb697f77f9dc6c41f 2024-11-07T14:19:50,188 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/recovered.edits/418.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef/recovered.edits/418.seqid 2024-11-07T14:19:50,188 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/e3403f2020527e03e67f0f2ab02983ef 2024-11-07T14:19:50,188 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:19:50,190 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,191 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:19:50,193 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:19:50,194 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,194 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:19:50,194 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989190194"}]},"ts":"9223372036854775807"} 2024-11-07T14:19:50,195 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:19:50,195 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e3403f2020527e03e67f0f2ab02983ef, NAME => 'TestAcidGuarantees,,1730989164928.e3403f2020527e03e67f0f2ab02983ef.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:19:50,195 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:19:50,195 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989190195"}]},"ts":"9223372036854775807"} 2024-11-07T14:19:50,197 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:19:50,199 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 28 msec 2024-11-07T14:19:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-07T14:19:50,273 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-07T14:19:50,282 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238 (was 240), OpenFileDescriptor=451 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=411 (was 412), ProcessCount=11 (was 11), AvailableMemoryMB=5653 (was 5718) 2024-11-07T14:19:50,291 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=411, ProcessCount=11, AvailableMemoryMB=5653 2024-11-07T14:19:50,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:19:50,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:19:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:50,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T14:19:50,294 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:50,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-11-07T14:19:50,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T14:19:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742349_1525 (size=960) 2024-11-07T14:19:50,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:50,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:50,701 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8 2024-11-07T14:19:50,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742350_1526 (size=53) 2024-11-07T14:19:50,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:51,105 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:19:51,106 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 13fd071fb15e0e486dd456286374cf34, disabling compactions & flushes 2024-11-07T14:19:51,106 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,106 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,106 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. after waiting 0 ms 2024-11-07T14:19:51,106 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,106 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,106 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:51,107 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T14:19:51,107 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1730989191107"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730989191107"}]},"ts":"1730989191107"} 2024-11-07T14:19:51,108 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T14:19:51,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T14:19:51,108 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989191108"}]},"ts":"1730989191108"} 2024-11-07T14:19:51,109 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-07T14:19:51,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, ASSIGN}] 2024-11-07T14:19:51,114 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, ASSIGN 2024-11-07T14:19:51,114 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, ASSIGN; state=OFFLINE, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=false 2024-11-07T14:19:51,265 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:51,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:51,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:51,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:51,419 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,419 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:19:51,420 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,420 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:19:51,420 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,420 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,421 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,422 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:51,422 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName A 2024-11-07T14:19:51,422 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:51,423 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:51,423 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,423 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:51,424 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName B 2024-11-07T14:19:51,424 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:51,424 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:51,424 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,425 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:51,425 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName C 2024-11-07T14:19:51,425 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:51,425 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:51,425 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,426 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,426 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,427 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:19:51,428 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:51,429 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T14:19:51,430 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened 13fd071fb15e0e486dd456286374cf34; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69008110, jitterRate=0.028300970792770386}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:19:51,430 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:51,431 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., pid=147, masterSystemTime=1730989191417 2024-11-07T14:19:51,432 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,432 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:51,432 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=OPEN, openSeqNum=2, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:51,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-07T14:19:51,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 in 167 msec 2024-11-07T14:19:51,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-07T14:19:51,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, ASSIGN in 321 msec 2024-11-07T14:19:51,436 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T14:19:51,436 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989191436"}]},"ts":"1730989191436"} 2024-11-07T14:19:51,436 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-07T14:19:51,439 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T14:19:51,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-11-07T14:19:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-07T14:19:52,398 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-11-07T14:19:52,399 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-11-07T14:19:52,404 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T14:19:52,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:52,406 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:52,407 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:52,408 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T14:19:52,409 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T14:19:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-07T14:19:52,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T14:19:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-07T14:19:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742351_1527 (size=996) 2024-11-07T14:19:52,820 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-07T14:19:52,820 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-07T14:19:52,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:19:52,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, REOPEN/MOVE}] 2024-11-07T14:19:52,824 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, REOPEN/MOVE 2024-11-07T14:19:52,824 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:52,825 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:19:52,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:52,976 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:52,977 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:52,977 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:19:52,977 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 13fd071fb15e0e486dd456286374cf34, disabling compactions & flushes 2024-11-07T14:19:52,977 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:52,977 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:52,977 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. after waiting 0 ms 2024-11-07T14:19:52,977 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:52,980 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T14:19:52,980 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:52,981 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:52,981 WARN [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: 13fd071fb15e0e486dd456286374cf34 to self. 2024-11-07T14:19:52,982 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:52,982 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=CLOSED 2024-11-07T14:19:52,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-07T14:19:52,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 in 158 msec 2024-11-07T14:19:52,984 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, REOPEN/MOVE; state=CLOSED, location=69430dbfd73f,45917,1730989044081; forceNewPlan=false, retain=true 2024-11-07T14:19:53,134 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=OPENING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:19:53,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,289 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,289 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} 2024-11-07T14:19:53,289 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,289 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T14:19:53,290 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,290 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,291 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,291 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:53,292 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName A 2024-11-07T14:19:53,292 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:53,293 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:53,293 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,293 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:53,294 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName B 2024-11-07T14:19:53,294 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:53,294 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:53,294 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,294 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-07T14:19:53,294 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd071fb15e0e486dd456286374cf34 columnFamilyName C 2024-11-07T14:19:53,294 DEBUG [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:53,295 INFO [StoreOpener-13fd071fb15e0e486dd456286374cf34-1 {}] regionserver.HStore(327): Store=13fd071fb15e0e486dd456286374cf34/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T14:19:53,295 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,295 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,296 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,297 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T14:19:53,298 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,298 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened 13fd071fb15e0e486dd456286374cf34; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65204078, jitterRate=-0.028383523225784302}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T14:19:53,299 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:53,300 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., pid=152, masterSystemTime=1730989193286 2024-11-07T14:19:53,301 DEBUG [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,301 INFO [RS_OPEN_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,301 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=OPEN, openSeqNum=5, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-11-07T14:19:53,303 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 in 167 msec 2024-11-07T14:19:53,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-07T14:19:53,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, REOPEN/MOVE in 480 msec 2024-11-07T14:19:53,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-07T14:19:53,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-11-07T14:19:53,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 894 msec 2024-11-07T14:19:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-07T14:19:53,307 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-11-07T14:19:53,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-11-07T14:19:53,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,315 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-11-07T14:19:53,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,318 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-11-07T14:19:53,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-07T14:19:53,326 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,326 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-11-07T14:19:53,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-11-07T14:19:53,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,335 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-11-07T14:19:53,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,339 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d832d43 to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1d3a95 2024-11-07T14:19:53,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bf224f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,343 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15b6349f to 127.0.0.1:51818 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503a7d2e 2024-11-07T14:19:53,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79be903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T14:19:53,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-11-07T14:19:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:53,354 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:53,355 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:53,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:53,355 DEBUG [hconnection-0x68bc9cc5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,356 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,360 DEBUG [hconnection-0x2c4cbe96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,360 DEBUG [hconnection-0x12e6e534-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,361 DEBUG [hconnection-0x472016f5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,361 DEBUG [hconnection-0x5fa6746f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,361 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,361 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,361 DEBUG [hconnection-0x6ee5ff0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,362 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,362 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34834, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,362 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,365 DEBUG [hconnection-0x1c6b6463-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:19:53,366 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:53,367 DEBUG [hconnection-0x48689fc9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,368 DEBUG [hconnection-0x718839c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,368 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,369 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,372 DEBUG [hconnection-0x6a9efee3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T14:19:53,373 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T14:19:53,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989253380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989253381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989253382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989253383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989253383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110740695ff689e744348c89807754b92ec0_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989193364/Put/seqid=0 2024-11-07T14:19:53,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742352_1528 (size=12154) 2024-11-07T14:19:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:53,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989253485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989253485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989253486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989253486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989253486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:53,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:53,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:53,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:53,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989253686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989253688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989253688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989253689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989253689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,812 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:53,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:53,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,814 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:53,817 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110740695ff689e744348c89807754b92ec0_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110740695ff689e744348c89807754b92ec0_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:53,818 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:53,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 is 175, key is test_row_0/A:col10/1730989193364/Put/seqid=0 2024-11-07T14:19:53,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742353_1529 (size=30955) 2024-11-07T14:19:53,823 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 2024-11-07T14:19:53,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/963dbf1d240c427f84d4663b0bdc6f49 is 50, key is test_row_0/B:col10/1730989193364/Put/seqid=0 2024-11-07T14:19:53,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742354_1530 (size=12001) 2024-11-07T14:19:53,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:53,965 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:53,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:53,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:53,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989253989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989253992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989253992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989253993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:53,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:53,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989253993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/963dbf1d240c427f84d4663b0bdc6f49 2024-11-07T14:19:54,270 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c5cb47f8f2014d0892eb29b3952b08de is 50, key is test_row_0/C:col10/1730989193364/Put/seqid=0 2024-11-07T14:19:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742355_1531 (size=12001) 2024-11-07T14:19:54,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:54,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:54,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989254492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:54,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989254497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:54,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989254498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:54,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989254498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:54,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989254499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:54,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c5cb47f8f2014d0892eb29b3952b08de 2024-11-07T14:19:54,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 2024-11-07T14:19:54,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4, entries=150, sequenceid=16, filesize=30.2 K 2024-11-07T14:19:54,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/963dbf1d240c427f84d4663b0bdc6f49 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49 2024-11-07T14:19:54,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49, entries=150, sequenceid=16, filesize=11.7 K 2024-11-07T14:19:54,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c5cb47f8f2014d0892eb29b3952b08de as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de 2024-11-07T14:19:54,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de, entries=150, sequenceid=16, filesize=11.7 K 2024-11-07T14:19:54,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 13fd071fb15e0e486dd456286374cf34 in 1336ms, sequenceid=16, compaction requested=false 2024-11-07T14:19:54,702 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-07T14:19:54,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:54,728 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:54,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-07T14:19:54,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:54,728 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:54,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078ccef2e403a9447995c48576b42d0d51_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989193375/Put/seqid=0 2024-11-07T14:19:54,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742356_1532 (size=12154) 2024-11-07T14:19:55,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:55,143 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078ccef2e403a9447995c48576b42d0d51_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078ccef2e403a9447995c48576b42d0d51_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:55,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/ab6f38b72f424f9699fd2f8f8dd51122, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:55,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/ab6f38b72f424f9699fd2f8f8dd51122 is 175, key is test_row_0/A:col10/1730989193375/Put/seqid=0 2024-11-07T14:19:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742357_1533 (size=30955) 2024-11-07T14:19:55,413 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T14:19:55,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:55,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:55,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989255506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989255506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989255508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989255508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989255509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,549 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/ab6f38b72f424f9699fd2f8f8dd51122 2024-11-07T14:19:55,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/103f8515cc724845a229d208f063cd89 is 50, key is test_row_0/B:col10/1730989193375/Put/seqid=0 2024-11-07T14:19:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742358_1534 (size=12001) 2024-11-07T14:19:55,562 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/103f8515cc724845a229d208f063cd89 2024-11-07T14:19:55,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/f225411e1286496ba5df10e256ef24cc is 50, key is test_row_0/C:col10/1730989193375/Put/seqid=0 2024-11-07T14:19:55,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742359_1535 (size=12001) 2024-11-07T14:19:55,571 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/f225411e1286496ba5df10e256ef24cc 2024-11-07T14:19:55,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/ab6f38b72f424f9699fd2f8f8dd51122 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122 2024-11-07T14:19:55,578 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122, entries=150, sequenceid=40, filesize=30.2 K 2024-11-07T14:19:55,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/103f8515cc724845a229d208f063cd89 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89 2024-11-07T14:19:55,582 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T14:19:55,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/f225411e1286496ba5df10e256ef24cc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc 2024-11-07T14:19:55,586 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc, entries=150, sequenceid=40, filesize=11.7 K 2024-11-07T14:19:55,586 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 13fd071fb15e0e486dd456286374cf34 in 858ms, sequenceid=40, compaction requested=false 2024-11-07T14:19:55,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:55,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:55,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-11-07T14:19:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-11-07T14:19:55,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-07T14:19:55,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2320 sec 2024-11-07T14:19:55,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.2360 sec 2024-11-07T14:19:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:55,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:55,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:55,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:55,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:55,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:55,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:55,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:55,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075590e512454546ffa7e52b9f3940556d_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:55,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742360_1536 (size=12154) 2024-11-07T14:19:55,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989255628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989255629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989255630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989255732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989255732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989255732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989255934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989255934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:55,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989255935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,023 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:56,027 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411075590e512454546ffa7e52b9f3940556d_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075590e512454546ffa7e52b9f3940556d_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:56,027 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d2a737d0f3c64173a040453880c63485, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:56,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d2a737d0f3c64173a040453880c63485 is 175, key is test_row_0/A:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:56,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742361_1537 (size=30955) 2024-11-07T14:19:56,038 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d2a737d0f3c64173a040453880c63485 2024-11-07T14:19:56,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/479c113150ea4f6db5fbf4b168e4a794 is 50, key is test_row_0/B:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:56,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742362_1538 (size=12001) 2024-11-07T14:19:56,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989256237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989256237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989256238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/479c113150ea4f6db5fbf4b168e4a794 2024-11-07T14:19:56,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/81be3276ef3b41f5811e79a108d065b6 is 50, key is test_row_0/C:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742363_1539 (size=12001) 2024-11-07T14:19:56,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989256740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989256742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:56,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989256744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:56,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/81be3276ef3b41f5811e79a108d065b6 2024-11-07T14:19:56,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d2a737d0f3c64173a040453880c63485 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485 2024-11-07T14:19:56,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485, entries=150, sequenceid=54, filesize=30.2 K 2024-11-07T14:19:56,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/479c113150ea4f6db5fbf4b168e4a794 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794 2024-11-07T14:19:56,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794, entries=150, sequenceid=54, filesize=11.7 K 2024-11-07T14:19:56,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/81be3276ef3b41f5811e79a108d065b6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6 2024-11-07T14:19:56,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6, entries=150, sequenceid=54, filesize=11.7 K 2024-11-07T14:19:56,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 13fd071fb15e0e486dd456286374cf34 in 1279ms, sequenceid=54, compaction requested=true 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:19:56,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:56,889 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:56,889 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:56,890 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:56,890 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:19:56,890 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:56,890 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.2 K 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 963dbf1d240c427f84d4663b0bdc6f49, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1730989193364 2024-11-07T14:19:56,891 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:56,891 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=90.7 K 2024-11-07T14:19:56,891 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485] 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 103f8515cc724845a229d208f063cd89, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730989193375 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5b28bc8cc1d4e1fa5b5fbf82192fca4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1730989193364 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 479c113150ea4f6db5fbf4b168e4a794, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:19:56,891 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab6f38b72f424f9699fd2f8f8dd51122, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730989193375 2024-11-07T14:19:56,892 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2a737d0f3c64173a040453880c63485, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:19:56,897 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:56,898 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:56,898 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/070a4a959a784efab399cc208e554226 is 50, key is test_row_0/B:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:56,898 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411076f2b02e7b39f4867972e199d66f186f3_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:56,900 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411076f2b02e7b39f4867972e199d66f186f3_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:56,900 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076f2b02e7b39f4867972e199d66f186f3_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:56,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742364_1540 (size=4469) 2024-11-07T14:19:56,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742365_1541 (size=12104) 2024-11-07T14:19:57,305 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#459 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:57,306 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/62d3ecef3cf14f4a933d0ab3ef27a972 is 175, key is test_row_0/A:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:57,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742366_1542 (size=31058) 2024-11-07T14:19:57,313 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/62d3ecef3cf14f4a933d0ab3ef27a972 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972 2024-11-07T14:19:57,314 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/070a4a959a784efab399cc208e554226 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/070a4a959a784efab399cc208e554226 2024-11-07T14:19:57,317 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 62d3ecef3cf14f4a933d0ab3ef27a972(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:57,317 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:57,317 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989196889; duration=0sec 2024-11-07T14:19:57,317 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:19:57,317 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:19:57,318 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:19:57,318 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 070a4a959a784efab399cc208e554226(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:57,318 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:57,318 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989196889; duration=0sec 2024-11-07T14:19:57,318 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:57,318 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:19:57,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:19:57,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:19:57,319 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,319 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.2 K 2024-11-07T14:19:57,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5cb47f8f2014d0892eb29b3952b08de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1730989193364 2024-11-07T14:19:57,320 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f225411e1286496ba5df10e256ef24cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1730989193375 2024-11-07T14:19:57,320 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81be3276ef3b41f5811e79a108d065b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:19:57,327 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:19:57,327 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/12be5696c0b94e888774abfacdf907fb is 50, key is test_row_0/C:col10/1730989195499/Put/seqid=0 2024-11-07T14:19:57,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742367_1543 (size=12104) 2024-11-07T14:19:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-07T14:19:57,459 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-07T14:19:57,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:19:57,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-07T14:19:57,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:57,461 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:19:57,462 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:19:57,462 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:19:57,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:57,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-07T14:19:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:57,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:57,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:57,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:57,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:57,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fdde8e3fa60a46d69b265962611145d1_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989195627/Put/seqid=0 2024-11-07T14:19:57,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742368_1544 (size=12154) 2024-11-07T14:19:57,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989257538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989257539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:57,613 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:57,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:57,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989257641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989257642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,737 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/12be5696c0b94e888774abfacdf907fb as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/12be5696c0b94e888774abfacdf907fb 2024-11-07T14:19:57,740 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into 12be5696c0b94e888774abfacdf907fb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:19:57,740 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:57,741 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=13, startTime=1730989196889; duration=0sec 2024-11-07T14:19:57,741 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:19:57,741 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:19:57,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989257748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989257749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989257754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:57,766 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:57,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:57,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989257843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989257844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,918 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:57,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:57,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:57,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:57,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:57,935 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:57,937 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fdde8e3fa60a46d69b265962611145d1_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fdde8e3fa60a46d69b265962611145d1_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:57,938 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/dec78d320ff24ac0b89321e93e85b404, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:57,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/dec78d320ff24ac0b89321e93e85b404 is 175, key is test_row_0/A:col10/1730989195627/Put/seqid=0 2024-11-07T14:19:57,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742369_1545 (size=30955) 2024-11-07T14:19:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:58,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,139 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:34807,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989258145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989258147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,343 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/dec78d320ff24ac0b89321e93e85b404 2024-11-07T14:19:58,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4158398b681749e397a178b7ae97b3f3 is 50, key is test_row_0/B:col10/1730989195627/Put/seqid=0 2024-11-07T14:19:58,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742370_1546 (size=12001) 2024-11-07T14:19:58,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4158398b681749e397a178b7ae97b3f3 2024-11-07T14:19:58,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/1a3ee5a1da224231a3345d5ce7497c21 is 50, key is test_row_0/C:col10/1730989195627/Put/seqid=0 2024-11-07T14:19:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742371_1547 (size=12001) 2024-11-07T14:19:58,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/1a3ee5a1da224231a3345d5ce7497c21 2024-11-07T14:19:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/dec78d320ff24ac0b89321e93e85b404 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404 2024-11-07T14:19:58,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404, entries=150, sequenceid=79, filesize=30.2 K 2024-11-07T14:19:58,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4158398b681749e397a178b7ae97b3f3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3 2024-11-07T14:19:58,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3, entries=150, sequenceid=79, filesize=11.7 K 2024-11-07T14:19:58,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/1a3ee5a1da224231a3345d5ce7497c21 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21 2024-11-07T14:19:58,377 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21, entries=150, sequenceid=79, filesize=11.7 K 2024-11-07T14:19:58,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 13fd071fb15e0e486dd456286374cf34 in 854ms, sequenceid=79, compaction requested=false 2024-11-07T14:19:58,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:19:58,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-07T14:19:58,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:58,530 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-07T14:19:58,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:58,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:58,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:58,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110761411d2e00024e528706c2208bef89a4_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989197534/Put/seqid=0 2024-11-07T14:19:58,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742372_1548 (size=12154) 2024-11-07T14:19:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:58,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:19:58,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989258697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989258699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989258800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989258802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:19:58,945 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110761411d2e00024e528706c2208bef89a4_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110761411d2e00024e528706c2208bef89a4_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:58,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/deb77834d8a447108f547afcbfe092d8, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:19:58,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/deb77834d8a447108f547afcbfe092d8 is 175, key is test_row_0/A:col10/1730989197534/Put/seqid=0 2024-11-07T14:19:58,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742373_1549 (size=30955) 2024-11-07T14:19:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/deb77834d8a447108f547afcbfe092d8 2024-11-07T14:19:58,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/88e8b06ab4fe4a33b66ecb17bc15fe82 is 50, key is test_row_0/B:col10/1730989197534/Put/seqid=0 2024-11-07T14:19:58,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742374_1550 (size=12001) 2024-11-07T14:19:59,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989259004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989259005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989259306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989259307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,364 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/88e8b06ab4fe4a33b66ecb17bc15fe82 2024-11-07T14:19:59,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0e3ec58fe31545fa81d82e8a84028e96 is 50, key is test_row_0/C:col10/1730989197534/Put/seqid=0 2024-11-07T14:19:59,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742375_1551 (size=12001) 2024-11-07T14:19:59,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:19:59,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989259752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,753 DEBUG [Thread-2349 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4123 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:19:59,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989259764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,766 DEBUG [Thread-2357 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:19:59,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989259767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,769 DEBUG [Thread-2353 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:19:59,774 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0e3ec58fe31545fa81d82e8a84028e96 2024-11-07T14:19:59,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/deb77834d8a447108f547afcbfe092d8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8 2024-11-07T14:19:59,781 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8, entries=150, sequenceid=93, filesize=30.2 K 2024-11-07T14:19:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/88e8b06ab4fe4a33b66ecb17bc15fe82 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82 2024-11-07T14:19:59,785 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82, entries=150, sequenceid=93, filesize=11.7 K 2024-11-07T14:19:59,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0e3ec58fe31545fa81d82e8a84028e96 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96 2024-11-07T14:19:59,788 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96, entries=150, sequenceid=93, filesize=11.7 K 2024-11-07T14:19:59,789 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 13fd071fb15e0e486dd456286374cf34 in 1259ms, sequenceid=93, compaction requested=true 2024-11-07T14:19:59,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:19:59,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:19:59,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-07T14:19:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-07T14:19:59,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-07T14:19:59,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3290 sec 2024-11-07T14:19:59,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 2.3320 sec 2024-11-07T14:19:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:19:59,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:19:59,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:19:59,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989259825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989259825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fe2bb2ac58cd4f39a00bb9df0eeef252_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989198687/Put/seqid=0 2024-11-07T14:19:59,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742376_1552 (size=12154) 2024-11-07T14:19:59,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989259928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:19:59,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:19:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989259928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989260131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989260132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,245 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:00,248 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fe2bb2ac58cd4f39a00bb9df0eeef252_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe2bb2ac58cd4f39a00bb9df0eeef252_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:00,249 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/932a8cfbf8db42f887017f3ef8f000aa, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:00,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/932a8cfbf8db42f887017f3ef8f000aa is 175, key is test_row_0/A:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:00,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742377_1553 (size=30955) 2024-11-07T14:20:00,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989260434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989260435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,654 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/932a8cfbf8db42f887017f3ef8f000aa 2024-11-07T14:20:00,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/65b911bdaffe41919bcf2cc093f8c8d7 is 50, key is test_row_0/B:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:00,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742378_1554 (size=12001) 2024-11-07T14:20:00,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989260940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:00,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989260941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:01,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/65b911bdaffe41919bcf2cc093f8c8d7 2024-11-07T14:20:01,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0b41cfeb1a3943789b766950c884fb07 is 50, key is test_row_0/C:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:01,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742379_1555 (size=12001) 2024-11-07T14:20:01,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0b41cfeb1a3943789b766950c884fb07 2024-11-07T14:20:01,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/932a8cfbf8db42f887017f3ef8f000aa as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa 2024-11-07T14:20:01,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa, entries=150, sequenceid=117, filesize=30.2 K 2024-11-07T14:20:01,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/65b911bdaffe41919bcf2cc093f8c8d7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7 2024-11-07T14:20:01,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T14:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/0b41cfeb1a3943789b766950c884fb07 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07 2024-11-07T14:20:01,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07, entries=150, sequenceid=117, filesize=11.7 K 2024-11-07T14:20:01,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 13fd071fb15e0e486dd456286374cf34 in 1678ms, sequenceid=117, compaction requested=true 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:01,491 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:01,491 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:01,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:01,492 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:01,492 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:01,492 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/070a4a959a784efab399cc208e554226, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=47.0 K 2024-11-07T14:20:01,492 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=121.0 K 2024-11-07T14:20:01,492 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa] 2024-11-07T14:20:01,492 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 070a4a959a784efab399cc208e554226, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4158398b681749e397a178b7ae97b3f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730989195627 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62d3ecef3cf14f4a933d0ab3ef27a972, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 88e8b06ab4fe4a33b66ecb17bc15fe82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1730989197534 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting dec78d320ff24ac0b89321e93e85b404, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730989195627 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 65b911bdaffe41919bcf2cc093f8c8d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:01,493 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting deb77834d8a447108f547afcbfe092d8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1730989197534 2024-11-07T14:20:01,494 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 932a8cfbf8db42f887017f3ef8f000aa, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:01,500 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:01,501 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:01,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/70eea9d79eb14e62b6dc119f50bd4218 is 50, key is test_row_0/B:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:01,501 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107b14bfc7e94fd464baf4a9a1ed12580d3_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:01,503 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107b14bfc7e94fd464baf4a9a1ed12580d3_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:01,503 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107b14bfc7e94fd464baf4a9a1ed12580d3_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742380_1556 (size=12241) 2024-11-07T14:20:01,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742381_1557 (size=4469) 2024-11-07T14:20:01,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-07T14:20:01,566 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-07T14:20:01,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:01,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-07T14:20:01,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:01,570 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:01,570 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:01,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:01,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:01,721 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:01,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-07T14:20:01,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:01,722 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:01,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fb84cdb60a314b92817f1a23b8f44337_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989199824/Put/seqid=0 2024-11-07T14:20:01,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742382_1558 (size=12154) 2024-11-07T14:20:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:01,910 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/70eea9d79eb14e62b6dc119f50bd4218 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/70eea9d79eb14e62b6dc119f50bd4218 2024-11-07T14:20:01,913 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#471 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:01,914 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 70eea9d79eb14e62b6dc119f50bd4218(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:01,914 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:01,914 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=12, startTime=1730989201491; duration=0sec 2024-11-07T14:20:01,914 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:01,914 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:01,914 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:20:01,914 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/bc4916e685174350974c61b2f02ab84b is 175, key is test_row_0/A:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:01,915 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:20:01,916 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:20:01,916 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:01,916 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/12be5696c0b94e888774abfacdf907fb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=47.0 K 2024-11-07T14:20:01,916 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 12be5696c0b94e888774abfacdf907fb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1730989195499 2024-11-07T14:20:01,917 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a3ee5a1da224231a3345d5ce7497c21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1730989195627 2024-11-07T14:20:01,917 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e3ec58fe31545fa81d82e8a84028e96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1730989197534 2024-11-07T14:20:01,917 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b41cfeb1a3943789b766950c884fb07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:01,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742383_1559 (size=31195) 2024-11-07T14:20:01,925 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:01,926 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/6af3b31e741e41a2903202fdf77efc13 is 50, key is test_row_0/C:col10/1730989198687/Put/seqid=0 2024-11-07T14:20:01,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742384_1560 (size=12241) 2024-11-07T14:20:01,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:01,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:01,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:01,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989261977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:01,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:01,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989261980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989262081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989262083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:02,144 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107fb84cdb60a314b92817f1a23b8f44337_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fb84cdb60a314b92817f1a23b8f44337_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:02,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/4e433c23ebb74097b7a4837f848dc3c7, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:02,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/4e433c23ebb74097b7a4837f848dc3c7 is 175, key is test_row_0/A:col10/1730989199824/Put/seqid=0 2024-11-07T14:20:02,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742385_1561 (size=30955) 2024-11-07T14:20:02,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:02,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989262286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989262286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,322 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/bc4916e685174350974c61b2f02ab84b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b 2024-11-07T14:20:02,329 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into bc4916e685174350974c61b2f02ab84b(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:02,329 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:02,329 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=12, startTime=1730989201491; duration=0sec 2024-11-07T14:20:02,329 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:02,329 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:02,333 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/6af3b31e741e41a2903202fdf77efc13 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6af3b31e741e41a2903202fdf77efc13 2024-11-07T14:20:02,337 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into 6af3b31e741e41a2903202fdf77efc13(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:02,337 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:02,337 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=12, startTime=1730989201491; duration=0sec 2024-11-07T14:20:02,337 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:02,338 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:02,550 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/4e433c23ebb74097b7a4837f848dc3c7 2024-11-07T14:20:02,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/60b37977c5e14301a88555bb53024306 is 50, key is test_row_0/B:col10/1730989199824/Put/seqid=0 2024-11-07T14:20:02,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742386_1562 (size=12001) 2024-11-07T14:20:02,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989262589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:02,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989262591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:02,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:02,960 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/60b37977c5e14301a88555bb53024306 2024-11-07T14:20:02,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/a4cd2a516ff4415282d2377236441dbc is 50, key is test_row_0/C:col10/1730989199824/Put/seqid=0 2024-11-07T14:20:02,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742387_1563 (size=12001) 2024-11-07T14:20:02,971 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/a4cd2a516ff4415282d2377236441dbc 2024-11-07T14:20:02,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/4e433c23ebb74097b7a4837f848dc3c7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7 2024-11-07T14:20:02,982 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7, entries=150, sequenceid=129, filesize=30.2 K 2024-11-07T14:20:02,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/60b37977c5e14301a88555bb53024306 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306 2024-11-07T14:20:02,986 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306, entries=150, sequenceid=129, filesize=11.7 K 2024-11-07T14:20:02,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/a4cd2a516ff4415282d2377236441dbc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc 2024-11-07T14:20:02,990 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc, entries=150, sequenceid=129, filesize=11.7 K 2024-11-07T14:20:02,991 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 13fd071fb15e0e486dd456286374cf34 in 1269ms, sequenceid=129, compaction requested=false 2024-11-07T14:20:02,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:02,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:02,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-07T14:20:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-07T14:20:03,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-07T14:20:03,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4220 sec 2024-11-07T14:20:03,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.4340 sec 2024-11-07T14:20:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:03,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:03,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076496d808808b4db79fdb54c7471641e3_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:03,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742388_1564 (size=14794) 2024-11-07T14:20:03,108 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:03,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989263109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989263109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,111 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076496d808808b4db79fdb54c7471641e3_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076496d808808b4db79fdb54c7471641e3_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:03,112 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5e9563736f5847a99f2b2a1aa995c3f7, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:03,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5e9563736f5847a99f2b2a1aa995c3f7 is 175, key is test_row_0/A:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:03,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742389_1565 (size=39749) 2024-11-07T14:20:03,121 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5e9563736f5847a99f2b2a1aa995c3f7 2024-11-07T14:20:03,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f68e9f0b3afe4b209acca31f42d801d9 is 50, key is test_row_0/B:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742390_1566 (size=12151) 2024-11-07T14:20:03,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989263212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989263212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989263415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989263416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f68e9f0b3afe4b209acca31f42d801d9 2024-11-07T14:20:03,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/90e88d4e62364005816ddeea31c6a5c1 is 50, key is test_row_0/C:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:03,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742391_1567 (size=12151) 2024-11-07T14:20:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-07T14:20:03,673 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-07T14:20:03,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-07T14:20:03,676 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:03,677 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:03,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:03,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989263718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989263719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:03,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34814 deadline: 1730989263780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,782 DEBUG [Thread-2353 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:20:03,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1730989263790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,792 DEBUG [Thread-2349 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:20:03,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:03,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34834 deadline: 1730989263795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,796 DEBUG [Thread-2357 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., hostname=69430dbfd73f,45917,1730989044081, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T14:20:03,828 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-07T14:20:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:03,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:03,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/90e88d4e62364005816ddeea31c6a5c1 2024-11-07T14:20:03,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5e9563736f5847a99f2b2a1aa995c3f7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7 2024-11-07T14:20:03,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7, entries=200, sequenceid=157, filesize=38.8 K 2024-11-07T14:20:03,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f68e9f0b3afe4b209acca31f42d801d9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9 2024-11-07T14:20:03,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9, entries=150, sequenceid=157, filesize=11.9 K 2024-11-07T14:20:03,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/90e88d4e62364005816ddeea31c6a5c1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1 2024-11-07T14:20:03,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1, entries=150, sequenceid=157, filesize=11.9 K 2024-11-07T14:20:03,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 13fd071fb15e0e486dd456286374cf34 in 881ms, sequenceid=157, compaction requested=true 2024-11-07T14:20:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:03,978 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:03,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:03,978 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:03,979 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:03,979 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:03,979 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:03,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:03,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-07T14:20:03,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,988 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:03,988 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:20:03,988 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:03,988 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=99.5 K 2024-11-07T14:20:03,988 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,988 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7] 2024-11-07T14:20:03,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:03,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:03,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:03,989 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:03,989 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/70eea9d79eb14e62b6dc119f50bd4218, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.5 K 2024-11-07T14:20:03,989 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc4916e685174350974c61b2f02ab84b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:03,989 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e433c23ebb74097b7a4837f848dc3c7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1730989199816 2024-11-07T14:20:03,989 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 70eea9d79eb14e62b6dc119f50bd4218, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:03,990 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e9563736f5847a99f2b2a1aa995c3f7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201975 2024-11-07T14:20:03,990 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 60b37977c5e14301a88555bb53024306, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1730989199816 2024-11-07T14:20:03,990 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f68e9f0b3afe4b209acca31f42d801d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201976 2024-11-07T14:20:03,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074ab222462b374c1faed5a3c355747ec0_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989203105/Put/seqid=0 2024-11-07T14:20:03,997 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:03,999 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#481 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:04,000 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/cabc3b0067da4f3cb67d4950a2ea8bc7 is 50, key is test_row_0/B:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:04,000 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107e781993427674db3ab6ce2772a7426e3_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:04,002 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107e781993427674db3ab6ce2772a7426e3_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:04,003 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107e781993427674db3ab6ce2772a7426e3_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:04,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742392_1568 (size=12304) 2024-11-07T14:20:04,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:04,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742393_1569 (size=12493) 2024-11-07T14:20:04,010 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074ab222462b374c1faed5a3c355747ec0_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074ab222462b374c1faed5a3c355747ec0_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:04,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/25dbd238b1144abab70d5d02266e3aee, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:04,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/25dbd238b1144abab70d5d02266e3aee is 175, key is test_row_0/A:col10/1730989203105/Put/seqid=0 2024-11-07T14:20:04,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742394_1570 (size=4469) 2024-11-07T14:20:04,015 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/cabc3b0067da4f3cb67d4950a2ea8bc7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/cabc3b0067da4f3cb67d4950a2ea8bc7 2024-11-07T14:20:04,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742395_1571 (size=31105) 2024-11-07T14:20:04,020 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into cabc3b0067da4f3cb67d4950a2ea8bc7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:04,021 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:04,021 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989203978; duration=0sec 2024-11-07T14:20:04,021 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:04,021 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:04,021 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:04,022 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:04,022 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:20:04,022 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:04,022 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6af3b31e741e41a2903202fdf77efc13, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.5 K 2024-11-07T14:20:04,022 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6af3b31e741e41a2903202fdf77efc13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730989198687 2024-11-07T14:20:04,023 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting a4cd2a516ff4415282d2377236441dbc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1730989199816 2024-11-07T14:20:04,023 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 90e88d4e62364005816ddeea31c6a5c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201976 2024-11-07T14:20:04,030 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#482 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:04,030 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/6446094cb8964b0380518f3f26a89526 is 50, key is test_row_0/C:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:04,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742396_1572 (size=12493) 2024-11-07T14:20:04,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:04,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:04,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989264256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989264257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:04,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989264360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989264361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,415 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#480 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:04,416 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/37a32ce505564553be528c624ced15a8 is 175, key is test_row_0/A:col10/1730989201976/Put/seqid=0 2024-11-07T14:20:04,417 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/25dbd238b1144abab70d5d02266e3aee 2024-11-07T14:20:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742397_1573 (size=31447) 2024-11-07T14:20:04,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f8cb80313f894dbdbb9cdfaed2ac9444 is 50, key is test_row_0/B:col10/1730989203105/Put/seqid=0 2024-11-07T14:20:04,431 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/37a32ce505564553be528c624ced15a8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8 2024-11-07T14:20:04,438 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 37a32ce505564553be528c624ced15a8(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:04,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:04,438 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989203978; duration=0sec 2024-11-07T14:20:04,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:04,438 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:04,439 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/6446094cb8964b0380518f3f26a89526 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6446094cb8964b0380518f3f26a89526 2024-11-07T14:20:04,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742398_1574 (size=12151) 2024-11-07T14:20:04,445 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into 6446094cb8964b0380518f3f26a89526(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:04,445 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:04,445 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=13, startTime=1730989203978; duration=0sec 2024-11-07T14:20:04,445 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:04,445 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:04,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989264562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989264562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:04,845 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f8cb80313f894dbdbb9cdfaed2ac9444 2024-11-07T14:20:04,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/165a4435b9e04844a6c73b448c1bd30a is 50, key is test_row_0/C:col10/1730989203105/Put/seqid=0 2024-11-07T14:20:04,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742399_1575 (size=12151) 2024-11-07T14:20:04,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989264864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:04,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:04,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989264865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,255 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/165a4435b9e04844a6c73b448c1bd30a 2024-11-07T14:20:05,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/25dbd238b1144abab70d5d02266e3aee as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee 2024-11-07T14:20:05,263 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee, entries=150, sequenceid=168, filesize=30.4 K 2024-11-07T14:20:05,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f8cb80313f894dbdbb9cdfaed2ac9444 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444 2024-11-07T14:20:05,268 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444, entries=150, sequenceid=168, filesize=11.9 K 2024-11-07T14:20:05,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/165a4435b9e04844a6c73b448c1bd30a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a 2024-11-07T14:20:05,272 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a, entries=150, sequenceid=168, filesize=11.9 K 2024-11-07T14:20:05,273 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 13fd071fb15e0e486dd456286374cf34 in 1285ms, sequenceid=168, compaction requested=false 2024-11-07T14:20:05,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:05,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:05,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-07T14:20:05,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-07T14:20:05,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-07T14:20:05,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5970 sec 2024-11-07T14:20:05,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.6020 sec 2024-11-07T14:20:05,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-07T14:20:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:05,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:05,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:05,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:05,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:05,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:05,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:05,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ce4c6e4e4be74c3bb171c3a6ed9071db_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:05,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989265388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989265389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742400_1576 (size=14794) 2024-11-07T14:20:05,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989265491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989265492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989265694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989265696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-07T14:20:05,780 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-07T14:20:05,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-07T14:20:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:05,783 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:05,783 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:05,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:05,822 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:05,826 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ce4c6e4e4be74c3bb171c3a6ed9071db_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ce4c6e4e4be74c3bb171c3a6ed9071db_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:05,827 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b88ae5e2bb14e929c9bf71f780d0643, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:05,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b88ae5e2bb14e929c9bf71f780d0643 is 175, key is test_row_0/A:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:05,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742401_1577 (size=39749) 2024-11-07T14:20:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:05,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:05,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:05,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:05,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989265998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:06,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989266000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:06,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,232 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b88ae5e2bb14e929c9bf71f780d0643 2024-11-07T14:20:06,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/84c1d039de014f6a846ebbea0121cde4 is 50, key is test_row_0/B:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:06,241 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742402_1578 (size=12151) 2024-11-07T14:20:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:06,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989266502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989266507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,546 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/84c1d039de014f6a846ebbea0121cde4 2024-11-07T14:20:06,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/581e67a0d5e64f7aa840b4e19c9e4eca is 50, key is test_row_0/C:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:06,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742403_1579 (size=12151) 2024-11-07T14:20:06,699 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:06,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:06,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:06,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:06,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:06,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:07,004 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:07,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:07,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:07,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:07,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:07,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/581e67a0d5e64f7aa840b4e19c9e4eca 2024-11-07T14:20:07,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b88ae5e2bb14e929c9bf71f780d0643 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643 2024-11-07T14:20:07,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643, entries=200, sequenceid=198, filesize=38.8 K 2024-11-07T14:20:07,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/84c1d039de014f6a846ebbea0121cde4 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4 2024-11-07T14:20:07,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4, entries=150, sequenceid=198, filesize=11.9 K 2024-11-07T14:20:07,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/581e67a0d5e64f7aa840b4e19c9e4eca as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca 2024-11-07T14:20:07,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca, entries=150, sequenceid=198, filesize=11.9 K 2024-11-07T14:20:07,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 13fd071fb15e0e486dd456286374cf34 in 1699ms, sequenceid=198, compaction requested=true 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:07,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:07,071 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:07,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:07,072 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:07,076 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:07,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:07,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:07,076 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:07,076 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,076 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,076 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/cabc3b0067da4f3cb67d4950a2ea8bc7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.9 K 2024-11-07T14:20:07,076 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=99.9 K 2024-11-07T14:20:07,076 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,076 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643] 2024-11-07T14:20:07,077 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting cabc3b0067da4f3cb67d4950a2ea8bc7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201976 2024-11-07T14:20:07,077 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37a32ce505564553be528c624ced15a8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201976 2024-11-07T14:20:07,077 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting f8cb80313f894dbdbb9cdfaed2ac9444, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989203105 2024-11-07T14:20:07,078 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25dbd238b1144abab70d5d02266e3aee, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989203105 2024-11-07T14:20:07,078 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 84c1d039de014f6a846ebbea0121cde4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:07,078 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b88ae5e2bb14e929c9bf71f780d0643, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:07,084 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:07,085 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:07,086 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/5f8319d706c84e7094c18ef674a14ad6 is 50, key is test_row_0/B:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:07,087 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107255686786ef94406b844aacd51ee4dc8_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:07,089 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107255686786ef94406b844aacd51ee4dc8_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:07,089 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107255686786ef94406b844aacd51ee4dc8_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:07,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742404_1580 (size=12595) 2024-11-07T14:20:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742405_1581 (size=4469) 2024-11-07T14:20:07,103 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#489 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:07,104 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9752e718278a4ff287f1be9abb05ebbf is 175, key is test_row_0/A:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:07,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742406_1582 (size=31549) 2024-11-07T14:20:07,157 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,158 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:07,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076f35f909a0ce47058dc616b1694cb924_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989205384/Put/seqid=0 2024-11-07T14:20:07,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742407_1583 (size=12304) 2024-11-07T14:20:07,498 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/5f8319d706c84e7094c18ef674a14ad6 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/5f8319d706c84e7094c18ef674a14ad6 2024-11-07T14:20:07,502 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 5f8319d706c84e7094c18ef674a14ad6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:07,502 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:07,502 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989207071; duration=0sec 2024-11-07T14:20:07,502 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:07,502 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:07,502 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:07,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:07,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:20:07,504 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,504 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6446094cb8964b0380518f3f26a89526, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=35.9 K 2024-11-07T14:20:07,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 6446094cb8964b0380518f3f26a89526, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1730989201976 2024-11-07T14:20:07,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 165a4435b9e04844a6c73b448c1bd30a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1730989203105 2024-11-07T14:20:07,504 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 581e67a0d5e64f7aa840b4e19c9e4eca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:07,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:07,513 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9752e718278a4ff287f1be9abb05ebbf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf 2024-11-07T14:20:07,515 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#491 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:07,516 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/104df67ddb4f4277948cc25c93311812 is 50, key is test_row_0/C:col10/1730989204255/Put/seqid=0 2024-11-07T14:20:07,518 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 9752e718278a4ff287f1be9abb05ebbf(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:07,518 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:07,518 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989207071; duration=0sec 2024-11-07T14:20:07,519 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:07,519 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:07,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742408_1584 (size=12595) 2024-11-07T14:20:07,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989267548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989267550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:07,572 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076f35f909a0ce47058dc616b1694cb924_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076f35f909a0ce47058dc616b1694cb924_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:07,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/f35dd5d32cf94dcca1a8b89626004213, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:07,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/f35dd5d32cf94dcca1a8b89626004213 is 175, key is test_row_0/A:col10/1730989205384/Put/seqid=0 2024-11-07T14:20:07,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742409_1585 (size=31105) 2024-11-07T14:20:07,582 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=208, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/f35dd5d32cf94dcca1a8b89626004213 2024-11-07T14:20:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/c6d155e0b7924366bda0560609f6c3c7 is 50, key is test_row_0/B:col10/1730989205384/Put/seqid=0 2024-11-07T14:20:07,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742410_1586 (size=12151) 2024-11-07T14:20:07,609 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/c6d155e0b7924366bda0560609f6c3c7 2024-11-07T14:20:07,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/3908184bec384ce8aa40b88a5cb8c3c7 is 50, key is test_row_0/C:col10/1730989205384/Put/seqid=0 2024-11-07T14:20:07,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742411_1587 (size=12151) 2024-11-07T14:20:07,624 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/3908184bec384ce8aa40b88a5cb8c3c7 2024-11-07T14:20:07,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/f35dd5d32cf94dcca1a8b89626004213 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213 2024-11-07T14:20:07,633 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213, entries=150, sequenceid=208, filesize=30.4 K 2024-11-07T14:20:07,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/c6d155e0b7924366bda0560609f6c3c7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7 2024-11-07T14:20:07,638 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7, entries=150, sequenceid=208, filesize=11.9 K 2024-11-07T14:20:07,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/3908184bec384ce8aa40b88a5cb8c3c7 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7 2024-11-07T14:20:07,642 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7, entries=150, sequenceid=208, filesize=11.9 K 2024-11-07T14:20:07,643 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 13fd071fb15e0e486dd456286374cf34 in 485ms, sequenceid=208, compaction requested=false 2024-11-07T14:20:07,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:07,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:07,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-07T14:20:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-07T14:20:07,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-07T14:20:07,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8610 sec 2024-11-07T14:20:07,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.8640 sec 2024-11-07T14:20:07,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:07,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-07T14:20:07,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:07,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:07,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:07,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:07,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ec02db66049149efbc6b1d00d5c092a1_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:07,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742412_1588 (size=12304) 2024-11-07T14:20:07,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989267662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989267663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989267765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989267767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-07T14:20:07,887 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-07T14:20:07,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-07T14:20:07,889 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T14:20:07,890 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:07,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:07,933 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/104df67ddb4f4277948cc25c93311812 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/104df67ddb4f4277948cc25c93311812 2024-11-07T14:20:07,938 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into 104df67ddb4f4277948cc25c93311812(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:07,938 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:07,938 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=13, startTime=1730989207071; duration=0sec 2024-11-07T14:20:07,938 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:07,938 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:07,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989267968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989267970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T14:20:08,042 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T14:20:08,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,063 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,066 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ec02db66049149efbc6b1d00d5c092a1_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ec02db66049149efbc6b1d00d5c092a1_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:08,067 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b0d300db74446dcadab68ed85d251f8, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b0d300db74446dcadab68ed85d251f8 is 175, key is test_row_0/A:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:08,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742413_1589 (size=31105) 2024-11-07T14:20:08,195 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T14:20:08,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:08,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T14:20:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:08,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989268274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989268274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,348 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T14:20:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,477 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b0d300db74446dcadab68ed85d251f8 2024-11-07T14:20:08,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/782a0c7678c24410b67c4a41001bedb1 is 50, key is test_row_0/B:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:08,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742414_1590 (size=12151) 2024-11-07T14:20:08,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/782a0c7678c24410b67c4a41001bedb1 2024-11-07T14:20:08,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T14:20:08,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T14:20:08,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:08,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:08,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/11be95cecf4a462c89c92896b81197fc is 50, key is test_row_0/C:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:08,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742415_1591 (size=12151) 2024-11-07T14:20:08,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/11be95cecf4a462c89c92896b81197fc 2024-11-07T14:20:08,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/6b0d300db74446dcadab68ed85d251f8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8 2024-11-07T14:20:08,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8, entries=150, sequenceid=237, filesize=30.4 K 2024-11-07T14:20:08,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/782a0c7678c24410b67c4a41001bedb1 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1 2024-11-07T14:20:08,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1, entries=150, sequenceid=237, filesize=11.9 K 2024-11-07T14:20:08,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/11be95cecf4a462c89c92896b81197fc as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc 2024-11-07T14:20:08,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc, entries=150, sequenceid=237, filesize=11.9 K 2024-11-07T14:20:08,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 13fd071fb15e0e486dd456286374cf34 in 886ms, sequenceid=237, compaction requested=true 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:08,539 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:08,539 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:08,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:08,540 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:08,540 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:08,540 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,540 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/5f8319d706c84e7094c18ef674a14ad6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=36.0 K 2024-11-07T14:20:08,540 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:08,541 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:08,541 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,541 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=91.6 K 2024-11-07T14:20:08,541 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,541 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8] 2024-11-07T14:20:08,541 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f8319d706c84e7094c18ef674a14ad6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:08,541 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9752e718278a4ff287f1be9abb05ebbf, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:08,541 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c6d155e0b7924366bda0560609f6c3c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1730989205375 2024-11-07T14:20:08,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting f35dd5d32cf94dcca1a8b89626004213, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1730989205375 2024-11-07T14:20:08,542 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 782a0c7678c24410b67c4a41001bedb1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730989207532 2024-11-07T14:20:08,542 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b0d300db74446dcadab68ed85d251f8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730989207532 2024-11-07T14:20:08,547 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,549 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107dac8bc29e0b84bbd9f498237210c5179_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,549 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:08,549 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/31efc852b603489aad0438fe342a4048 is 50, key is test_row_0/B:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:08,550 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107dac8bc29e0b84bbd9f498237210c5179_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,550 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107dac8bc29e0b84bbd9f498237210c5179_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742416_1592 (size=12697) 2024-11-07T14:20:08,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742417_1593 (size=4469) 2024-11-07T14:20:08,657 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-07T14:20:08,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,658 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074e4ba717f6d74e6ab9ef17c900d754ad_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989207662/Put/seqid=0 2024-11-07T14:20:08,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742418_1594 (size=12304) 2024-11-07T14:20:08,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,674 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411074e4ba717f6d74e6ab9ef17c900d754ad_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074e4ba717f6d74e6ab9ef17c900d754ad_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:08,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e019c35d2a214ed1a47984a67e88fe5b, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e019c35d2a214ed1a47984a67e88fe5b is 175, key is test_row_0/A:col10/1730989207662/Put/seqid=0 2024-11-07T14:20:08,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742419_1595 (size=31105) 2024-11-07T14:20:08,705 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e019c35d2a214ed1a47984a67e88fe5b 2024-11-07T14:20:08,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d5217bd8533342e087defcabf7b649e2 is 50, key is test_row_0/B:col10/1730989207662/Put/seqid=0 2024-11-07T14:20:08,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742420_1596 (size=12151) 2024-11-07T14:20:08,717 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d5217bd8533342e087defcabf7b649e2 2024-11-07T14:20:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c1b17779ad484d328f5a72d780e3cdc3 is 50, key is test_row_0/C:col10/1730989207662/Put/seqid=0 2024-11-07T14:20:08,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742421_1597 (size=12151) 2024-11-07T14:20:08,734 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c1b17779ad484d328f5a72d780e3cdc3 2024-11-07T14:20:08,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e019c35d2a214ed1a47984a67e88fe5b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b 2024-11-07T14:20:08,747 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b, entries=150, sequenceid=247, filesize=30.4 K 2024-11-07T14:20:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d5217bd8533342e087defcabf7b649e2 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2 2024-11-07T14:20:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,754 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2, entries=150, sequenceid=247, filesize=11.9 K 2024-11-07T14:20:08,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/c1b17779ad484d328f5a72d780e3cdc3 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3 2024-11-07T14:20:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,774 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3, entries=150, sequenceid=247, filesize=11.9 K 2024-11-07T14:20:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,775 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 13fd071fb15e0e486dd456286374cf34 in 117ms, sequenceid=247, compaction requested=true 2024-11-07T14:20:08,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-07T14:20:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-07T14:20:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-07T14:20:08,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 886 msec 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 890 msec 2024-11-07T14:20:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c13fb3aa812b448eb593e2b301649441_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:08,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742423_1599 (size=24858) 2024-11-07T14:20:08,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:08,881 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107c13fb3aa812b448eb593e2b301649441_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c13fb3aa812b448eb593e2b301649441_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:08,882 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/1079a50b4677486aa535cb3442c53c8d, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:08,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/1079a50b4677486aa535cb3442c53c8d is 175, key is test_row_0/A:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:08,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742422_1598 (size=74495) 2024-11-07T14:20:08,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989268909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989268911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:08,956 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#498 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:08,957 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/8d0a419da1324eb18f62eed423a37176 is 175, key is test_row_0/A:col10/1730989207532/Put/seqid=0 2024-11-07T14:20:08,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742424_1600 (size=31651) 2024-11-07T14:20:08,961 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/31efc852b603489aad0438fe342a4048 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/31efc852b603489aad0438fe342a4048 2024-11-07T14:20:08,966 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/8d0a419da1324eb18f62eed423a37176 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176 2024-11-07T14:20:08,966 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 31efc852b603489aad0438fe342a4048(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:08,966 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:08,966 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989208539; duration=0sec 2024-11-07T14:20:08,966 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:08,966 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:08,966 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:20:08,969 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:20:08,969 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:20:08,969 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:08,969 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/104df67ddb4f4277948cc25c93311812, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=47.9 K 2024-11-07T14:20:08,969 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 104df67ddb4f4277948cc25c93311812, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1730989204252 2024-11-07T14:20:08,970 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 3908184bec384ce8aa40b88a5cb8c3c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1730989205375 2024-11-07T14:20:08,971 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 11be95cecf4a462c89c92896b81197fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730989207532 2024-11-07T14:20:08,971 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting c1b17779ad484d328f5a72d780e3cdc3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989207658 2024-11-07T14:20:08,971 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 8d0a419da1324eb18f62eed423a37176(size=30.9 K), total size for store is 61.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:08,971 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:08,971 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989208539; duration=0sec 2024-11-07T14:20:08,971 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:08,971 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:09,000 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#503 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:09,002 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/d193a308269045c2a9ad69e31082cd80 is 50, key is test_row_0/C:col10/1730989207662/Put/seqid=0 2024-11-07T14:20:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-07T14:20:09,004 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-07T14:20:09,005 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-07T14:20:09,007 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:09,007 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:09,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:09,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989269012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989269014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742425_1601 (size=12731) 2024-11-07T14:20:09,022 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/d193a308269045c2a9ad69e31082cd80 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/d193a308269045c2a9ad69e31082cd80 2024-11-07T14:20:09,026 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into d193a308269045c2a9ad69e31082cd80(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:09,026 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:09,026 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=12, startTime=1730989208539; duration=0sec 2024-11-07T14:20:09,026 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:09,026 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:09,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:09,159 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T14:20:09,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:09,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989269216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989269217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,294 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=258, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/1079a50b4677486aa535cb3442c53c8d 2024-11-07T14:20:09,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4ee71a0f15b746a2a7e2eadba303cfa2 is 50, key is test_row_0/B:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:09,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742426_1602 (size=12251) 2024-11-07T14:20:09,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:09,311 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T14:20:09,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:09,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T14:20:09,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:09,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989269519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:09,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989269520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:09,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T14:20:09,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:09,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:09,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4ee71a0f15b746a2a7e2eadba303cfa2 2024-11-07T14:20:09,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/56ff5d94fd584634bc20cb855fa6cd8a is 50, key is test_row_0/C:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742427_1603 (size=12251) 2024-11-07T14:20:09,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/56ff5d94fd584634bc20cb855fa6cd8a 2024-11-07T14:20:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/1079a50b4677486aa535cb3442c53c8d as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d 2024-11-07T14:20:09,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d, entries=400, sequenceid=258, filesize=72.7 K 2024-11-07T14:20:09,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/4ee71a0f15b746a2a7e2eadba303cfa2 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2 2024-11-07T14:20:09,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2, entries=150, sequenceid=258, filesize=12.0 K 2024-11-07T14:20:09,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/56ff5d94fd584634bc20cb855fa6cd8a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a 2024-11-07T14:20:09,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a, entries=150, sequenceid=258, filesize=12.0 K 2024-11-07T14:20:09,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 13fd071fb15e0e486dd456286374cf34 in 904ms, sequenceid=258, compaction requested=true 2024-11-07T14:20:09,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137251 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:09,730 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:09,731 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,731 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,731 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=134.0 K 2024-11-07T14:20:09,731 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/31efc852b603489aad0438fe342a4048, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=36.2 K 2024-11-07T14:20:09,731 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,731 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d] 2024-11-07T14:20:09,731 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 31efc852b603489aad0438fe342a4048, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730989207532 2024-11-07T14:20:09,731 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d0a419da1324eb18f62eed423a37176, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1730989207532 2024-11-07T14:20:09,731 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting e019c35d2a214ed1a47984a67e88fe5b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989207658 2024-11-07T14:20:09,732 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d5217bd8533342e087defcabf7b649e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989207658 2024-11-07T14:20:09,732 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1079a50b4677486aa535cb3442c53c8d, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1730989208801 2024-11-07T14:20:09,732 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ee71a0f15b746a2a7e2eadba303cfa2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1730989208815 2024-11-07T14:20:09,738 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:09,739 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#506 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:09,739 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/52ac7efb5ba3438f8e8c95c1ba36a179 is 50, key is test_row_0/B:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:09,747 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107ddbd2f715f284683a385e50121709160_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:09,750 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107ddbd2f715f284683a385e50121709160_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:09,750 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107ddbd2f715f284683a385e50121709160_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742428_1604 (size=12899) 2024-11-07T14:20:09,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742429_1605 (size=4469) 2024-11-07T14:20:09,764 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#507 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:09,764 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9c8874ada5384f5687f8670893a40e7a is 175, key is test_row_0/A:col10/1730989208823/Put/seqid=0 2024-11-07T14:20:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742430_1606 (size=31853) 2024-11-07T14:20:09,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:09,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-07T14:20:09,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:09,770 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-07T14:20:09,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:09,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078e778a781ad44b1787de29bc89cbf55e_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989208905/Put/seqid=0 2024-11-07T14:20:09,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742431_1607 (size=12454) 2024-11-07T14:20:09,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:09,786 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411078e778a781ad44b1787de29bc89cbf55e_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e778a781ad44b1787de29bc89cbf55e_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:09,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9617ac74737f49778b384c7d2aca8f27, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:09,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9617ac74737f49778b384c7d2aca8f27 is 175, key is test_row_0/A:col10/1730989208905/Put/seqid=0 2024-11-07T14:20:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742432_1608 (size=31255) 2024-11-07T14:20:09,790 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9617ac74737f49778b384c7d2aca8f27 2024-11-07T14:20:09,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/0678a367cb2b4df8b2e4cd74b60241c9 is 50, key is test_row_0/B:col10/1730989208905/Put/seqid=0 2024-11-07T14:20:09,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742433_1609 (size=12301) 2024-11-07T14:20:10,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:10,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:10,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:10,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,166 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/52ac7efb5ba3438f8e8c95c1ba36a179 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/52ac7efb5ba3438f8e8c95c1ba36a179 2024-11-07T14:20:10,170 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 52ac7efb5ba3438f8e8c95c1ba36a179(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:10,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:10,170 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989209730; duration=0sec 2024-11-07T14:20:10,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:10,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:10,170 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T14:20:10,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-07T14:20:10,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-07T14:20:10,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. because compaction request was cancelled 2024-11-07T14:20:10,171 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:10,171 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9c8874ada5384f5687f8670893a40e7a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a 2024-11-07T14:20:10,175 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 9c8874ada5384f5687f8670893a40e7a(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:10,175 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989209730; duration=0sec 2024-11-07T14:20:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:10,175 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:10,200 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/0678a367cb2b4df8b2e4cd74b60241c9 2024-11-07T14:20:10,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/80cb156e2d0a414b87e03cf4e90c878c is 50, key is test_row_0/C:col10/1730989208905/Put/seqid=0 2024-11-07T14:20:10,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742434_1610 (size=12301) 2024-11-07T14:20:10,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,609 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/80cb156e2d0a414b87e03cf4e90c878c 2024-11-07T14:20:10,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/9617ac74737f49778b384c7d2aca8f27 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27 2024-11-07T14:20:10,616 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27, entries=150, sequenceid=286, filesize=30.5 K 2024-11-07T14:20:10,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/0678a367cb2b4df8b2e4cd74b60241c9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9 2024-11-07T14:20:10,620 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9, entries=150, sequenceid=286, filesize=12.0 K 2024-11-07T14:20:10,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/80cb156e2d0a414b87e03cf4e90c878c as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c 2024-11-07T14:20:10,624 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c, entries=150, sequenceid=286, filesize=12.0 K 2024-11-07T14:20:10,625 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 13fd071fb15e0e486dd456286374cf34 in 855ms, sequenceid=286, compaction requested=true 2024-11-07T14:20:10,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:10,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:10,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-07T14:20:10,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-07T14:20:10,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-07T14:20:10,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6190 sec 2024-11-07T14:20:10,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.6220 sec 2024-11-07T14:20:10,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:10,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:10,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110764ba531cd4594dc08e8e02ff9265b8da_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:10,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742435_1611 (size=12454) 2024-11-07T14:20:10,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989270979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:10,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989270980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,058 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:11,061 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024110764ba531cd4594dc08e8e02ff9265b8da_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110764ba531cd4594dc08e8e02ff9265b8da_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:11,062 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/924d74ab36a34124bcb158d99c8db89e, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:11,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/924d74ab36a34124bcb158d99c8db89e is 175, key is test_row_0/A:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:11,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742436_1612 (size=31255) 2024-11-07T14:20:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-07T14:20:11,111 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-07T14:20:11,112 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-07T14:20:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-07T14:20:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:11,114 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-07T14:20:11,115 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T14:20:11,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T14:20:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:11,266 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989271282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989271283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:11,419 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,467 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=300, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/924d74ab36a34124bcb158d99c8db89e 2024-11-07T14:20:11,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d799429e45894a15a056641404180db0 is 50, key is test_row_0/B:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742437_1613 (size=12301) 2024-11-07T14:20:11,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:11,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:11,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:11,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:11,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:11,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989271785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:11,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989271787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,876 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:11,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d799429e45894a15a056641404180db0 2024-11-07T14:20:11,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:11,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:11,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ceb63cb9709241c3a99117700271dbc9 is 50, key is test_row_0/C:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:11,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742438_1614 (size=12301) 2024-11-07T14:20:12,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:12,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:12,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:12,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:12,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:12,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ceb63cb9709241c3a99117700271dbc9 2024-11-07T14:20:12,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/924d74ab36a34124bcb158d99c8db89e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e 2024-11-07T14:20:12,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e, entries=150, sequenceid=300, filesize=30.5 K 2024-11-07T14:20:12,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/d799429e45894a15a056641404180db0 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0 2024-11-07T14:20:12,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0, entries=150, sequenceid=300, filesize=12.0 K 2024-11-07T14:20:12,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ceb63cb9709241c3a99117700271dbc9 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9 2024-11-07T14:20:12,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9, entries=150, sequenceid=300, filesize=12.0 K 2024-11-07T14:20:12,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 13fd071fb15e0e486dd456286374cf34 in 1662ms, sequenceid=300, compaction requested=true 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:12,306 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:12,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:12,306 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94363 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:12,307 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,307 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,307 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/52ac7efb5ba3438f8e8c95c1ba36a179, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=36.6 K 2024-11-07T14:20:12,307 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=92.2 K 2024-11-07T14:20:12,307 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e] 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 52ac7efb5ba3438f8e8c95c1ba36a179, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1730989208815 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c8874ada5384f5687f8670893a40e7a, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1730989208815 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 0678a367cb2b4df8b2e4cd74b60241c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1730989208905 2024-11-07T14:20:12,307 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9617ac74737f49778b384c7d2aca8f27, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1730989208905 2024-11-07T14:20:12,308 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d799429e45894a15a056641404180db0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1730989210033 2024-11-07T14:20:12,308 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting 924d74ab36a34124bcb158d99c8db89e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1730989210033 2024-11-07T14:20:12,314 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#514 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:12,314 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/8fb479f15a94448cb5b18a67e0cc8864 is 50, key is test_row_0/B:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:12,315 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:12,318 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107f2d6f196c133412fb407bc8002cac46e_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:12,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107f2d6f196c133412fb407bc8002cac46e_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:12,319 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f2d6f196c133412fb407bc8002cac46e_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:12,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742439_1615 (size=13051) 2024-11-07T14:20:12,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742440_1616 (size=4469) 2024-11-07T14:20:12,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45917 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-07T14:20:12,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,335 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:12,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d045055044d14944a1a7a99d5e5a5467_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989210673/Put/seqid=0 2024-11-07T14:20:12,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742441_1617 (size=12454) 2024-11-07T14:20:12,724 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#515 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:12,724 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a480244adad6445c97dab2035eff1aab is 175, key is test_row_0/A:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:12,725 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/8fb479f15a94448cb5b18a67e0cc8864 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8fb479f15a94448cb5b18a67e0cc8864 2024-11-07T14:20:12,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742442_1618 (size=32005) 2024-11-07T14:20:12,741 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 8fb479f15a94448cb5b18a67e0cc8864(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:12,742 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:12,742 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989212306; duration=0sec 2024-11-07T14:20:12,742 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:12,742 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:12,742 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-07T14:20:12,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:12,744 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49584 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-07T14:20:12,744 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/C is initiating minor compaction (all files) 2024-11-07T14:20:12,744 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/a480244adad6445c97dab2035eff1aab as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab 2024-11-07T14:20:12,744 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/C in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:12,744 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/d193a308269045c2a9ad69e31082cd80, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=48.4 K 2024-11-07T14:20:12,745 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting d193a308269045c2a9ad69e31082cd80, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1730989207658 2024-11-07T14:20:12,745 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 56ff5d94fd584634bc20cb855fa6cd8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1730989208815 2024-11-07T14:20:12,746 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 80cb156e2d0a414b87e03cf4e90c878c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1730989208905 2024-11-07T14:20:12,746 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting ceb63cb9709241c3a99117700271dbc9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1730989210033 2024-11-07T14:20:12,747 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107d045055044d14944a1a7a99d5e5a5467_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d045055044d14944a1a7a99d5e5a5467_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/fd9daf884df2487cb9e2be9d71d5591e, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/fd9daf884df2487cb9e2be9d71d5591e is 175, key is test_row_0/A:col10/1730989210673/Put/seqid=0 2024-11-07T14:20:12,752 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into a480244adad6445c97dab2035eff1aab(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:12,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:12,752 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989212306; duration=0sec 2024-11-07T14:20:12,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:12,752 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:12,770 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#C#compaction#517 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:12,771 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/4041e8a151314dfbb9cab413d31d4b8e is 50, key is test_row_0/C:col10/1730989210033/Put/seqid=0 2024-11-07T14:20:12,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742444_1620 (size=13017) 2024-11-07T14:20:12,779 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/4041e8a151314dfbb9cab413d31d4b8e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/4041e8a151314dfbb9cab413d31d4b8e 2024-11-07T14:20:12,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742443_1619 (size=31255) 2024-11-07T14:20:12,781 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/fd9daf884df2487cb9e2be9d71d5591e 2024-11-07T14:20:12,785 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/C of 13fd071fb15e0e486dd456286374cf34 into 4041e8a151314dfbb9cab413d31d4b8e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:12,785 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:12,785 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/C, priority=12, startTime=1730989212306; duration=0sec 2024-11-07T14:20:12,785 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:12,785 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:12,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/1b628084663d487184b624699570d90b is 50, key is test_row_0/B:col10/1730989210673/Put/seqid=0 2024-11-07T14:20:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:12,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. as already flushing 2024-11-07T14:20:12,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742445_1621 (size=12301) 2024-11-07T14:20:12,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989272829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989272829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989272931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:12,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989272931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:13,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34892 deadline: 1730989273134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:13,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-07T14:20:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45917 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34870 deadline: 1730989273135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:13,193 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/1b628084663d487184b624699570d90b 2024-11-07T14:20:13,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/045ad4b53b4a4a1ca476d74afdc136b5 is 50, key is test_row_0/C:col10/1730989210673/Put/seqid=0 2024-11-07T14:20:13,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742446_1622 (size=12301) 2024-11-07T14:20:13,203 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/045ad4b53b4a4a1ca476d74afdc136b5 2024-11-07T14:20:13,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/fd9daf884df2487cb9e2be9d71d5591e as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e 2024-11-07T14:20:13,209 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e, entries=150, sequenceid=324, filesize=30.5 K 2024-11-07T14:20:13,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/1b628084663d487184b624699570d90b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b 2024-11-07T14:20:13,213 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b, entries=150, sequenceid=324, filesize=12.0 K 2024-11-07T14:20:13,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/045ad4b53b4a4a1ca476d74afdc136b5 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/045ad4b53b4a4a1ca476d74afdc136b5 2024-11-07T14:20:13,216 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/045ad4b53b4a4a1ca476d74afdc136b5, entries=150, sequenceid=324, filesize=12.0 K 2024-11-07T14:20:13,216 INFO [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 13fd071fb15e0e486dd456286374cf34 in 882ms, sequenceid=324, compaction requested=false 2024-11-07T14:20:13,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:13,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:13,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/69430dbfd73f:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-07T14:20:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-07T14:20:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:13,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-07T14:20:13,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1030 sec 2024-11-07T14:20:13,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.1070 sec 2024-11-07T14:20:13,363 DEBUG [Thread-2360 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:51818 2024-11-07T14:20:13,363 DEBUG [Thread-2362 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:51818 2024-11-07T14:20:13,363 DEBUG [Thread-2360 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,363 DEBUG [Thread-2362 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,364 DEBUG [Thread-2368 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15b6349f to 127.0.0.1:51818 2024-11-07T14:20:13,364 DEBUG [Thread-2368 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,365 DEBUG [Thread-2364 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:51818 2024-11-07T14:20:13,365 DEBUG [Thread-2366 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d832d43 to 127.0.0.1:51818 2024-11-07T14:20:13,365 DEBUG [Thread-2364 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,365 DEBUG [Thread-2366 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45917 {}] regionserver.HRegion(8581): Flush requested on 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:13,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-07T14:20:13,438 DEBUG [Thread-2355 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:51818 2024-11-07T14:20:13,438 DEBUG [Thread-2355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:13,438 DEBUG [Thread-2351 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:51818 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:13,438 DEBUG [Thread-2351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:13,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:13,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076d336df2d5a3453696cbb88eb0476611_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:13,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742447_1623 (size=12454) 2024-11-07T14:20:13,798 DEBUG [Thread-2357 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:51818 2024-11-07T14:20:13,798 DEBUG [Thread-2357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,842 DEBUG [Thread-2353 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:51818 2024-11-07T14:20:13,842 DEBUG [Thread-2353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:13,846 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:13,848 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411076d336df2d5a3453696cbb88eb0476611_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d336df2d5a3453696cbb88eb0476611_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:13,849 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d8053858a70846aabc9912abfe897f82, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:13,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d8053858a70846aabc9912abfe897f82 is 175, key is test_row_0/A:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742448_1624 (size=31255) 2024-11-07T14:20:13,875 DEBUG [Thread-2349 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:51818 2024-11-07T14:20:13,875 DEBUG [Thread-2349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:14,253 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d8053858a70846aabc9912abfe897f82 2024-11-07T14:20:14,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/e96727d46eda4145b6e36e8b1353ca30 is 50, key is test_row_0/B:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:14,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742449_1625 (size=12301) 2024-11-07T14:20:14,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/e96727d46eda4145b6e36e8b1353ca30 2024-11-07T14:20:14,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ed79296abede4ef9aa6c8e7f626324d8 is 50, key is test_row_0/C:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:14,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742450_1626 (size=12301) 2024-11-07T14:20:15,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ed79296abede4ef9aa6c8e7f626324d8 2024-11-07T14:20:15,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/d8053858a70846aabc9912abfe897f82 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82 2024-11-07T14:20:15,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82, entries=150, sequenceid=342, filesize=30.5 K 2024-11-07T14:20:15,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/e96727d46eda4145b6e36e8b1353ca30 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30 2024-11-07T14:20:15,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30, entries=150, sequenceid=342, filesize=12.0 K 2024-11-07T14:20:15,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/ed79296abede4ef9aa6c8e7f626324d8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ed79296abede4ef9aa6c8e7f626324d8 2024-11-07T14:20:15,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ed79296abede4ef9aa6c8e7f626324d8, entries=150, sequenceid=342, filesize=12.0 K 2024-11-07T14:20:15,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=20.13 KB/20610 for 13fd071fb15e0e486dd456286374cf34 in 1645ms, sequenceid=342, compaction requested=true 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:A, priority=-2147483648, current under compaction store size is 1 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:B, priority=-2147483648, current under compaction store size is 2 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd071fb15e0e486dd456286374cf34:C, priority=-2147483648, current under compaction store size is 3 2024-11-07T14:20:15,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/B is initiating minor compaction (all files) 2024-11-07T14:20:15,083 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1540): 13fd071fb15e0e486dd456286374cf34/A is initiating minor compaction (all files) 2024-11-07T14:20:15,083 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/B in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,084 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 13fd071fb15e0e486dd456286374cf34/A in TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,084 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8fb479f15a94448cb5b18a67e0cc8864, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=36.8 K 2024-11-07T14:20:15,084 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82] into tmpdir=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp, totalSize=92.3 K 2024-11-07T14:20:15,084 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. files: [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82] 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fb479f15a94448cb5b18a67e0cc8864, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1730989210033 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting a480244adad6445c97dab2035eff1aab, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1730989210033 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b628084663d487184b624699570d90b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1730989210670 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] compactions.Compactor(224): Compacting e96727d46eda4145b6e36e8b1353ca30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1730989212802 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd9daf884df2487cb9e2be9d71d5591e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1730989210670 2024-11-07T14:20:15,084 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8053858a70846aabc9912abfe897f82, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1730989212802 2024-11-07T14:20:15,090 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#B#compaction#523 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:15,090 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/8180d989106844fcb1e8d853f5a96cce is 50, key is test_row_0/B:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:15,093 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:15,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742451_1627 (size=13153) 2024-11-07T14:20:15,095 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241107f98344c9e2774976b5529c123a72ca75_13fd071fb15e0e486dd456286374cf34 store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:15,109 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241107f98344c9e2774976b5529c123a72ca75_13fd071fb15e0e486dd456286374cf34, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:15,109 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107f98344c9e2774976b5529c123a72ca75_13fd071fb15e0e486dd456286374cf34 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:15,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742452_1628 (size=4469) 2024-11-07T14:20:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-07T14:20:15,218 INFO [Thread-2359 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7245 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7163 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7169 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7248 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7169 2024-11-07T14:20:15,219 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-07T14:20:15,219 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:20:15,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:51818 2024-11-07T14:20:15,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:15,219 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-07T14:20:15,220 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-07T14:20:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:15,222 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989215222"}]},"ts":"1730989215222"} 2024-11-07T14:20:15,223 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-07T14:20:15,225 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-07T14:20:15,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-07T14:20:15,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, UNASSIGN}] 2024-11-07T14:20:15,227 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, UNASSIGN 2024-11-07T14:20:15,227 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=CLOSING, regionLocation=69430dbfd73f,45917,1730989044081 2024-11-07T14:20:15,228 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T14:20:15,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081}] 2024-11-07T14:20:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:15,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:15,379 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:15,379 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-07T14:20:15,379 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 13fd071fb15e0e486dd456286374cf34, disabling compactions & flushes 2024-11-07T14:20:15,379 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,498 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/8180d989106844fcb1e8d853f5a96cce as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8180d989106844fcb1e8d853f5a96cce 2024-11-07T14:20:15,501 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/B of 13fd071fb15e0e486dd456286374cf34 into 8180d989106844fcb1e8d853f5a96cce(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:15,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:15,501 INFO [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/B, priority=13, startTime=1730989215083; duration=0sec 2024-11-07T14:20:15,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-07T14:20:15,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:B 2024-11-07T14:20:15,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. because compaction request was cancelled 2024-11-07T14:20:15,501 DEBUG [RS:0;69430dbfd73f:45917-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:C 2024-11-07T14:20:15,513 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd071fb15e0e486dd456286374cf34#A#compaction#524 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T14:20:15,514 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5b846b29039546b1b9d1e1d95556bf8f is 175, key is test_row_0/A:col10/1730989213437/Put/seqid=0 2024-11-07T14:20:15,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742453_1629 (size=32107) 2024-11-07T14:20:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:15,920 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/5b846b29039546b1b9d1e1d95556bf8f as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5b846b29039546b1b9d1e1d95556bf8f 2024-11-07T14:20:15,924 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 13fd071fb15e0e486dd456286374cf34/A of 13fd071fb15e0e486dd456286374cf34 into 5b846b29039546b1b9d1e1d95556bf8f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T14:20:15,924 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:15,924 INFO [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34., storeName=13fd071fb15e0e486dd456286374cf34/A, priority=13, startTime=1730989215083; duration=0sec 2024-11-07T14:20:15,924 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T14:20:15,924 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,924 DEBUG [RS:0;69430dbfd73f:45917-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd071fb15e0e486dd456286374cf34:A 2024-11-07T14:20:15,924 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,924 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. after waiting 0 ms 2024-11-07T14:20:15,924 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:15,924 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 13fd071fb15e0e486dd456286374cf34 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=A 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=B 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 13fd071fb15e0e486dd456286374cf34, store=C 2024-11-07T14:20:15,925 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-07T14:20:15,929 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107670236f5806b4d0d96cd2498741d4797_13fd071fb15e0e486dd456286374cf34 is 50, key is test_row_0/A:col10/1730989213874/Put/seqid=0 2024-11-07T14:20:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742454_1630 (size=9914) 2024-11-07T14:20:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:16,333 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T14:20:16,335 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241107670236f5806b4d0d96cd2498741d4797_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107670236f5806b4d0d96cd2498741d4797_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:16,336 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e4c264e8c3b548f8aa691a6f018dbcdf, store: [table=TestAcidGuarantees family=A region=13fd071fb15e0e486dd456286374cf34] 2024-11-07T14:20:16,336 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e4c264e8c3b548f8aa691a6f018dbcdf is 175, key is test_row_0/A:col10/1730989213874/Put/seqid=0 2024-11-07T14:20:16,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742455_1631 (size=22561) 2024-11-07T14:20:16,740 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=350, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e4c264e8c3b548f8aa691a6f018dbcdf 2024-11-07T14:20:16,745 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f10dee2871d542719747d6665aee3691 is 50, key is test_row_0/B:col10/1730989213874/Put/seqid=0 2024-11-07T14:20:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742456_1632 (size=9857) 2024-11-07T14:20:17,148 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f10dee2871d542719747d6665aee3691 2024-11-07T14:20:17,153 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/7749e1da2d9a49ca91d45bbab419db98 is 50, key is test_row_0/C:col10/1730989213874/Put/seqid=0 2024-11-07T14:20:17,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742457_1633 (size=9857) 2024-11-07T14:20:17,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:17,557 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/7749e1da2d9a49ca91d45bbab419db98 2024-11-07T14:20:17,560 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/A/e4c264e8c3b548f8aa691a6f018dbcdf as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e4c264e8c3b548f8aa691a6f018dbcdf 2024-11-07T14:20:17,562 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e4c264e8c3b548f8aa691a6f018dbcdf, entries=100, sequenceid=350, filesize=22.0 K 2024-11-07T14:20:17,563 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/B/f10dee2871d542719747d6665aee3691 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f10dee2871d542719747d6665aee3691 2024-11-07T14:20:17,565 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f10dee2871d542719747d6665aee3691, entries=100, sequenceid=350, filesize=9.6 K 2024-11-07T14:20:17,566 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/.tmp/C/7749e1da2d9a49ca91d45bbab419db98 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/7749e1da2d9a49ca91d45bbab419db98 2024-11-07T14:20:17,568 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/7749e1da2d9a49ca91d45bbab419db98, entries=100, sequenceid=350, filesize=9.6 K 2024-11-07T14:20:17,569 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 13fd071fb15e0e486dd456286374cf34 in 1645ms, sequenceid=350, compaction requested=true 2024-11-07T14:20:17,570 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82] to archive 2024-11-07T14:20:17,570 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:20:17,572 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a5b28bc8cc1d4e1fa5b5fbf82192fca4 2024-11-07T14:20:17,573 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/ab6f38b72f424f9699fd2f8f8dd51122 2024-11-07T14:20:17,573 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/62d3ecef3cf14f4a933d0ab3ef27a972 2024-11-07T14:20:17,574 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d2a737d0f3c64173a040453880c63485 2024-11-07T14:20:17,575 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/dec78d320ff24ac0b89321e93e85b404 2024-11-07T14:20:17,576 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/deb77834d8a447108f547afcbfe092d8 2024-11-07T14:20:17,577 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/bc4916e685174350974c61b2f02ab84b 2024-11-07T14:20:17,578 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/932a8cfbf8db42f887017f3ef8f000aa 2024-11-07T14:20:17,579 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/4e433c23ebb74097b7a4837f848dc3c7 2024-11-07T14:20:17,579 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5e9563736f5847a99f2b2a1aa995c3f7 2024-11-07T14:20:17,580 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/37a32ce505564553be528c624ced15a8 2024-11-07T14:20:17,581 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/25dbd238b1144abab70d5d02266e3aee 2024-11-07T14:20:17,582 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b88ae5e2bb14e929c9bf71f780d0643 2024-11-07T14:20:17,583 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9752e718278a4ff287f1be9abb05ebbf 2024-11-07T14:20:17,583 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/f35dd5d32cf94dcca1a8b89626004213 2024-11-07T14:20:17,584 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/8d0a419da1324eb18f62eed423a37176 2024-11-07T14:20:17,585 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/6b0d300db74446dcadab68ed85d251f8 2024-11-07T14:20:17,585 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e019c35d2a214ed1a47984a67e88fe5b 2024-11-07T14:20:17,586 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/1079a50b4677486aa535cb3442c53c8d 2024-11-07T14:20:17,587 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9c8874ada5384f5687f8670893a40e7a 2024-11-07T14:20:17,588 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/9617ac74737f49778b384c7d2aca8f27 2024-11-07T14:20:17,589 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/a480244adad6445c97dab2035eff1aab 2024-11-07T14:20:17,590 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/924d74ab36a34124bcb158d99c8db89e 2024-11-07T14:20:17,591 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/fd9daf884df2487cb9e2be9d71d5591e 2024-11-07T14:20:17,591 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/d8053858a70846aabc9912abfe897f82 2024-11-07T14:20:17,592 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/070a4a959a784efab399cc208e554226, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/70eea9d79eb14e62b6dc119f50bd4218, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/cabc3b0067da4f3cb67d4950a2ea8bc7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/5f8319d706c84e7094c18ef674a14ad6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/31efc852b603489aad0438fe342a4048, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/52ac7efb5ba3438f8e8c95c1ba36a179, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8fb479f15a94448cb5b18a67e0cc8864, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30] to archive 2024-11-07T14:20:17,593 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:20:17,594 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/963dbf1d240c427f84d4663b0bdc6f49 2024-11-07T14:20:17,595 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/103f8515cc724845a229d208f063cd89 2024-11-07T14:20:17,596 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/070a4a959a784efab399cc208e554226 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/070a4a959a784efab399cc208e554226 2024-11-07T14:20:17,597 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/479c113150ea4f6db5fbf4b168e4a794 2024-11-07T14:20:17,597 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4158398b681749e397a178b7ae97b3f3 2024-11-07T14:20:17,598 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/88e8b06ab4fe4a33b66ecb17bc15fe82 2024-11-07T14:20:17,599 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/70eea9d79eb14e62b6dc119f50bd4218 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/70eea9d79eb14e62b6dc119f50bd4218 2024-11-07T14:20:17,600 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/65b911bdaffe41919bcf2cc093f8c8d7 2024-11-07T14:20:17,601 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/60b37977c5e14301a88555bb53024306 2024-11-07T14:20:17,601 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/cabc3b0067da4f3cb67d4950a2ea8bc7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/cabc3b0067da4f3cb67d4950a2ea8bc7 2024-11-07T14:20:17,602 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f68e9f0b3afe4b209acca31f42d801d9 2024-11-07T14:20:17,603 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f8cb80313f894dbdbb9cdfaed2ac9444 2024-11-07T14:20:17,604 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/5f8319d706c84e7094c18ef674a14ad6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/5f8319d706c84e7094c18ef674a14ad6 2024-11-07T14:20:17,605 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/84c1d039de014f6a846ebbea0121cde4 2024-11-07T14:20:17,605 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/c6d155e0b7924366bda0560609f6c3c7 2024-11-07T14:20:17,606 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/31efc852b603489aad0438fe342a4048 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/31efc852b603489aad0438fe342a4048 2024-11-07T14:20:17,607 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/782a0c7678c24410b67c4a41001bedb1 2024-11-07T14:20:17,608 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d5217bd8533342e087defcabf7b649e2 2024-11-07T14:20:17,609 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/52ac7efb5ba3438f8e8c95c1ba36a179 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/52ac7efb5ba3438f8e8c95c1ba36a179 2024-11-07T14:20:17,609 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/4ee71a0f15b746a2a7e2eadba303cfa2 2024-11-07T14:20:17,610 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/0678a367cb2b4df8b2e4cd74b60241c9 2024-11-07T14:20:17,611 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8fb479f15a94448cb5b18a67e0cc8864 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8fb479f15a94448cb5b18a67e0cc8864 2024-11-07T14:20:17,612 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/d799429e45894a15a056641404180db0 2024-11-07T14:20:17,612 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/1b628084663d487184b624699570d90b 2024-11-07T14:20:17,613 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/e96727d46eda4145b6e36e8b1353ca30 2024-11-07T14:20:17,614 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/12be5696c0b94e888774abfacdf907fb, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6af3b31e741e41a2903202fdf77efc13, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6446094cb8964b0380518f3f26a89526, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/104df67ddb4f4277948cc25c93311812, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/d193a308269045c2a9ad69e31082cd80, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9] to archive 2024-11-07T14:20:17,615 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T14:20:17,616 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c5cb47f8f2014d0892eb29b3952b08de 2024-11-07T14:20:17,616 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/f225411e1286496ba5df10e256ef24cc 2024-11-07T14:20:17,617 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/12be5696c0b94e888774abfacdf907fb to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/12be5696c0b94e888774abfacdf907fb 2024-11-07T14:20:17,618 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/81be3276ef3b41f5811e79a108d065b6 2024-11-07T14:20:17,619 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/1a3ee5a1da224231a3345d5ce7497c21 2024-11-07T14:20:17,619 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0e3ec58fe31545fa81d82e8a84028e96 2024-11-07T14:20:17,620 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6af3b31e741e41a2903202fdf77efc13 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6af3b31e741e41a2903202fdf77efc13 2024-11-07T14:20:17,621 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/0b41cfeb1a3943789b766950c884fb07 2024-11-07T14:20:17,622 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/a4cd2a516ff4415282d2377236441dbc 2024-11-07T14:20:17,623 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6446094cb8964b0380518f3f26a89526 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/6446094cb8964b0380518f3f26a89526 2024-11-07T14:20:17,623 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/90e88d4e62364005816ddeea31c6a5c1 2024-11-07T14:20:17,624 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/165a4435b9e04844a6c73b448c1bd30a 2024-11-07T14:20:17,625 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/104df67ddb4f4277948cc25c93311812 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/104df67ddb4f4277948cc25c93311812 2024-11-07T14:20:17,626 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/581e67a0d5e64f7aa840b4e19c9e4eca 2024-11-07T14:20:17,627 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/3908184bec384ce8aa40b88a5cb8c3c7 2024-11-07T14:20:17,627 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/11be95cecf4a462c89c92896b81197fc 2024-11-07T14:20:17,628 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/d193a308269045c2a9ad69e31082cd80 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/d193a308269045c2a9ad69e31082cd80 2024-11-07T14:20:17,629 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/c1b17779ad484d328f5a72d780e3cdc3 2024-11-07T14:20:17,630 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/56ff5d94fd584634bc20cb855fa6cd8a 2024-11-07T14:20:17,631 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/80cb156e2d0a414b87e03cf4e90c878c 2024-11-07T14:20:17,631 DEBUG [StoreCloser-TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ceb63cb9709241c3a99117700271dbc9 2024-11-07T14:20:17,635 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits/353.seqid, newMaxSeqId=353, maxSeqId=4 2024-11-07T14:20:17,635 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34. 2024-11-07T14:20:17,635 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 13fd071fb15e0e486dd456286374cf34: 2024-11-07T14:20:17,636 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:17,637 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=13fd071fb15e0e486dd456286374cf34, regionState=CLOSED 2024-11-07T14:20:17,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-07T14:20:17,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 13fd071fb15e0e486dd456286374cf34, server=69430dbfd73f,45917,1730989044081 in 2.4090 sec 2024-11-07T14:20:17,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-11-07T14:20:17,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=13fd071fb15e0e486dd456286374cf34, UNASSIGN in 2.4120 sec 2024-11-07T14:20:17,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-07T14:20:17,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.4150 sec 2024-11-07T14:20:17,641 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730989217641"}]},"ts":"1730989217641"} 2024-11-07T14:20:17,642 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-07T14:20:17,644 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-07T14:20:17,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.4240 sec 2024-11-07T14:20:19,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-07T14:20:19,326 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-07T14:20:19,326 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-07T14:20:19,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,328 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T14:20:19,328 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,330 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,332 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C, FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits] 2024-11-07T14:20:19,334 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5b846b29039546b1b9d1e1d95556bf8f to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/5b846b29039546b1b9d1e1d95556bf8f 2024-11-07T14:20:19,335 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e4c264e8c3b548f8aa691a6f018dbcdf to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/A/e4c264e8c3b548f8aa691a6f018dbcdf 2024-11-07T14:20:19,336 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8180d989106844fcb1e8d853f5a96cce to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/8180d989106844fcb1e8d853f5a96cce 2024-11-07T14:20:19,337 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f10dee2871d542719747d6665aee3691 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/B/f10dee2871d542719747d6665aee3691 2024-11-07T14:20:19,339 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/045ad4b53b4a4a1ca476d74afdc136b5 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/045ad4b53b4a4a1ca476d74afdc136b5 2024-11-07T14:20:19,340 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/4041e8a151314dfbb9cab413d31d4b8e to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/4041e8a151314dfbb9cab413d31d4b8e 2024-11-07T14:20:19,340 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/7749e1da2d9a49ca91d45bbab419db98 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/7749e1da2d9a49ca91d45bbab419db98 2024-11-07T14:20:19,341 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ed79296abede4ef9aa6c8e7f626324d8 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/C/ed79296abede4ef9aa6c8e7f626324d8 2024-11-07T14:20:19,343 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits/353.seqid to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34/recovered.edits/353.seqid 2024-11-07T14:20:19,343 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/default/TestAcidGuarantees/13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,343 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-07T14:20:19,344 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:20:19,344 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-07T14:20:19,346 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110740695ff689e744348c89807754b92ec0_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110740695ff689e744348c89807754b92ec0_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,347 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074ab222462b374c1faed5a3c355747ec0_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074ab222462b374c1faed5a3c355747ec0_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,348 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074e4ba717f6d74e6ab9ef17c900d754ad_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411074e4ba717f6d74e6ab9ef17c900d754ad_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,349 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075590e512454546ffa7e52b9f3940556d_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411075590e512454546ffa7e52b9f3940556d_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,350 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110761411d2e00024e528706c2208bef89a4_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110761411d2e00024e528706c2208bef89a4_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,351 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076496d808808b4db79fdb54c7471641e3_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076496d808808b4db79fdb54c7471641e3_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,352 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110764ba531cd4594dc08e8e02ff9265b8da_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024110764ba531cd4594dc08e8e02ff9265b8da_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,352 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107670236f5806b4d0d96cd2498741d4797_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107670236f5806b4d0d96cd2498741d4797_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,353 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d336df2d5a3453696cbb88eb0476611_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076d336df2d5a3453696cbb88eb0476611_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,354 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076f35f909a0ce47058dc616b1694cb924_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411076f35f909a0ce47058dc616b1694cb924_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,355 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078ccef2e403a9447995c48576b42d0d51_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078ccef2e403a9447995c48576b42d0d51_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,356 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e778a781ad44b1787de29bc89cbf55e_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411078e778a781ad44b1787de29bc89cbf55e_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,356 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c13fb3aa812b448eb593e2b301649441_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107c13fb3aa812b448eb593e2b301649441_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,357 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ce4c6e4e4be74c3bb171c3a6ed9071db_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ce4c6e4e4be74c3bb171c3a6ed9071db_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,358 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d045055044d14944a1a7a99d5e5a5467_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107d045055044d14944a1a7a99d5e5a5467_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,359 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ec02db66049149efbc6b1d00d5c092a1_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107ec02db66049149efbc6b1d00d5c092a1_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,360 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fb84cdb60a314b92817f1a23b8f44337_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fb84cdb60a314b92817f1a23b8f44337_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,360 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fdde8e3fa60a46d69b265962611145d1_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fdde8e3fa60a46d69b265962611145d1_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,361 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe2bb2ac58cd4f39a00bb9df0eeef252_13fd071fb15e0e486dd456286374cf34 to hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241107fe2bb2ac58cd4f39a00bb9df0eeef252_13fd071fb15e0e486dd456286374cf34 2024-11-07T14:20:19,362 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-07T14:20:19,364 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,366 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-07T14:20:19,367 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-07T14:20:19,368 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,368 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-07T14:20:19,368 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730989219368"}]},"ts":"9223372036854775807"} 2024-11-07T14:20:19,370 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-07T14:20:19,370 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 13fd071fb15e0e486dd456286374cf34, NAME => 'TestAcidGuarantees,,1730989190292.13fd071fb15e0e486dd456286374cf34.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T14:20:19,370 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-07T14:20:19,370 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730989219370"}]},"ts":"9223372036854775807"} 2024-11-07T14:20:19,371 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-07T14:20:19,373 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-07T14:20:19,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 47 msec 2024-11-07T14:20:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40909 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-07T14:20:19,429 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-07T14:20:19,439 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=241 (was 238) - Thread LEAK? -, OpenFileDescriptor=459 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=409 (was 411), ProcessCount=11 (was 11), AvailableMemoryMB=5523 (was 5653) 2024-11-07T14:20:19,439 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-07T14:20:19,439 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T14:20:19,439 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:51818 2024-11-07T14:20:19,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:19,439 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T14:20:19,439 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=67080707, stopped=false 2024-11-07T14:20:19,440 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=69430dbfd73f,40909,1730989043291 2024-11-07T14:20:19,442 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-07T14:20:19,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T14:20:19,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T14:20:19,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:20:19,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:20:19,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:19,442 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '69430dbfd73f,45917,1730989044081' ***** 2024-11-07T14:20:19,442 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T14:20:19,442 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T14:20:19,442 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T14:20:19,443 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T14:20:19,443 INFO [RS:0;69430dbfd73f:45917 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T14:20:19,443 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-07T14:20:19,443 INFO [RS:0;69430dbfd73f:45917 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T14:20:19,443 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(3579): Received CLOSE for 88991fc0836214a8e1586689b73580c1 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1224): stopping server 69430dbfd73f,45917,1730989044081 2024-11-07T14:20:19,444 DEBUG [RS:0;69430dbfd73f:45917 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 88991fc0836214a8e1586689b73580c1, disabling compactions & flushes 2024-11-07T14:20:19,444 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-07T14:20:19,444 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:20:19,444 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 88991fc0836214a8e1586689b73580c1=hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1.} 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. after waiting 0 ms 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:20:19,444 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 88991fc0836214a8e1586689b73580c1 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T14:20:19,444 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T14:20:19,444 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T14:20:19,445 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-07T14:20:19,445 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 88991fc0836214a8e1586689b73580c1 2024-11-07T14:20:19,468 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/.tmp/info/0554d7d93d244a6b96468d011097d9d8 is 45, key is default/info:d/1730989049021/Put/seqid=0 2024-11-07T14:20:19,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742458_1634 (size=5037) 2024-11-07T14:20:19,472 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/.tmp/info/0554d7d93d244a6b96468d011097d9d8 2024-11-07T14:20:19,476 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/.tmp/info/0554d7d93d244a6b96468d011097d9d8 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/info/0554d7d93d244a6b96468d011097d9d8 2024-11-07T14:20:19,476 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/info/584beb6c47f544218a47c7cd37cc612b is 143, key is hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1./info:regioninfo/1730989048930/Put/seqid=0 2024-11-07T14:20:19,478 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/info/0554d7d93d244a6b96468d011097d9d8, entries=2, sequenceid=6, filesize=4.9 K 2024-11-07T14:20:19,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742459_1635 (size=7725) 2024-11-07T14:20:19,479 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 88991fc0836214a8e1586689b73580c1 in 35ms, sequenceid=6, compaction requested=false 2024-11-07T14:20:19,480 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/info/584beb6c47f544218a47c7cd37cc612b 2024-11-07T14:20:19,483 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/namespace/88991fc0836214a8e1586689b73580c1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T14:20:19,483 INFO [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:20:19,483 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 88991fc0836214a8e1586689b73580c1: 2024-11-07T14:20:19,483 DEBUG [RS_CLOSE_REGION-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1730989047669.88991fc0836214a8e1586689b73580c1. 2024-11-07T14:20:19,489 INFO [regionserver/69430dbfd73f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T14:20:19,497 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/rep_barrier/08bf76dc9bd4432d90854c8da9171163 is 102, key is TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e./rep_barrier:/1730989074180/DeleteFamily/seqid=0 2024-11-07T14:20:19,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742460_1636 (size=6025) 2024-11-07T14:20:19,645 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T14:20:19,845 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T14:20:19,901 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/rep_barrier/08bf76dc9bd4432d90854c8da9171163 2024-11-07T14:20:19,920 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/table/78aaf5eeeabe4e4baf7f3f4af0ee683a is 96, key is TestAcidGuarantees,,1730989049216.5a16e97064ea2ba83f416db90324fc7e./table:/1730989074180/DeleteFamily/seqid=0 2024-11-07T14:20:19,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742461_1637 (size=5942) 2024-11-07T14:20:20,046 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T14:20:20,246 DEBUG [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-07T14:20:20,299 ERROR [LeaseRenewer:jenkins@localhost:34807 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:34807,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T14:20:20,324 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/table/78aaf5eeeabe4e4baf7f3f4af0ee683a 2024-11-07T14:20:20,328 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/info/584beb6c47f544218a47c7cd37cc612b as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/info/584beb6c47f544218a47c7cd37cc612b 2024-11-07T14:20:20,331 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/info/584beb6c47f544218a47c7cd37cc612b, entries=22, sequenceid=93, filesize=7.5 K 2024-11-07T14:20:20,331 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/rep_barrier/08bf76dc9bd4432d90854c8da9171163 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/rep_barrier/08bf76dc9bd4432d90854c8da9171163 2024-11-07T14:20:20,334 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/rep_barrier/08bf76dc9bd4432d90854c8da9171163, entries=6, sequenceid=93, filesize=5.9 K 2024-11-07T14:20:20,335 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/.tmp/table/78aaf5eeeabe4e4baf7f3f4af0ee683a as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/table/78aaf5eeeabe4e4baf7f3f4af0ee683a 2024-11-07T14:20:20,338 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/table/78aaf5eeeabe4e4baf7f3f4af0ee683a, entries=9, sequenceid=93, filesize=5.8 K 2024-11-07T14:20:20,338 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 894ms, sequenceid=93, compaction requested=false 2024-11-07T14:20:20,342 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-07T14:20:20,342 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T14:20:20,342 INFO [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T14:20:20,342 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T14:20:20,342 DEBUG [RS_CLOSE_META-regionserver/69430dbfd73f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T14:20:20,357 INFO [regionserver/69430dbfd73f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T14:20:20,357 INFO [regionserver/69430dbfd73f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T14:20:20,446 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1250): stopping server 69430dbfd73f,45917,1730989044081; all regions closed. 2024-11-07T14:20:20,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741834_1010 (size=26050) 2024-11-07T14:20:20,453 DEBUG [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/oldWALs 2024-11-07T14:20:20,453 INFO [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 69430dbfd73f%2C45917%2C1730989044081.meta:.meta(num 1730989047415) 2024-11-07T14:20:20,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741832_1008 (size=14209773) 2024-11-07T14:20:20,456 DEBUG [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/oldWALs 2024-11-07T14:20:20,456 INFO [RS:0;69430dbfd73f:45917 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 69430dbfd73f%2C45917%2C1730989044081:(num 1730989046463) 2024-11-07T14:20:20,456 DEBUG [RS:0;69430dbfd73f:45917 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:20,456 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T14:20:20,457 INFO [RS:0;69430dbfd73f:45917 {}] hbase.ChoreService(370): Chore service for: regionserver/69430dbfd73f:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T14:20:20,457 INFO [regionserver/69430dbfd73f:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T14:20:20,457 INFO [RS:0;69430dbfd73f:45917 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45917 2024-11-07T14:20:20,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/69430dbfd73f,45917,1730989044081 2024-11-07T14:20:20,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T14:20:20,464 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [69430dbfd73f,45917,1730989044081] 2024-11-07T14:20:20,465 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 69430dbfd73f,45917,1730989044081; numProcessing=1 2024-11-07T14:20:20,466 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/69430dbfd73f,45917,1730989044081 already deleted, retry=false 2024-11-07T14:20:20,466 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 69430dbfd73f,45917,1730989044081 expired; onlineServers=0 2024-11-07T14:20:20,466 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '69430dbfd73f,40909,1730989043291' ***** 2024-11-07T14:20:20,466 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T14:20:20,467 DEBUG [M:0;69430dbfd73f:40909 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238f856c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=69430dbfd73f/172.17.0.2:0 2024-11-07T14:20:20,467 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegionServer(1224): stopping server 69430dbfd73f,40909,1730989043291 2024-11-07T14:20:20,467 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegionServer(1250): stopping server 69430dbfd73f,40909,1730989043291; all regions closed. 2024-11-07T14:20:20,467 DEBUG [M:0;69430dbfd73f:40909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T14:20:20,467 DEBUG [M:0;69430dbfd73f:40909 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T14:20:20,467 DEBUG [M:0;69430dbfd73f:40909 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T14:20:20,467 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T14:20:20,467 DEBUG [master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.large.0-1730989046175 {}] cleaner.HFileCleaner(306): Exit Thread[master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.large.0-1730989046175,5,FailOnTimeoutGroup] 2024-11-07T14:20:20,467 DEBUG [master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.small.0-1730989046178 {}] cleaner.HFileCleaner(306): Exit Thread[master/69430dbfd73f:0:becomeActiveMaster-HFileCleaner.small.0-1730989046178,5,FailOnTimeoutGroup] 2024-11-07T14:20:20,467 INFO [M:0;69430dbfd73f:40909 {}] hbase.ChoreService(370): Chore service for: master/69430dbfd73f:0 had [] on shutdown 2024-11-07T14:20:20,467 DEBUG [M:0;69430dbfd73f:40909 {}] master.HMaster(1733): Stopping service threads 2024-11-07T14:20:20,467 INFO [M:0;69430dbfd73f:40909 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T14:20:20,467 ERROR [M:0;69430dbfd73f:40909 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:34807 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34807,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-07T14:20:20,468 INFO [M:0;69430dbfd73f:40909 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T14:20:20,468 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T14:20:20,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T14:20:20,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T14:20:20,469 DEBUG [M:0;69430dbfd73f:40909 {}] zookeeper.ZKUtil(347): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T14:20:20,469 WARN [M:0;69430dbfd73f:40909 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T14:20:20,469 INFO [M:0;69430dbfd73f:40909 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-07T14:20:20,469 INFO [M:0;69430dbfd73f:40909 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T14:20:20,469 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T14:20:20,469 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:20:20,469 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T14:20:20,469 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:20:20,469 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T14:20:20,469 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:20:20,469 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=764.71 KB heapSize=940.09 KB 2024-11-07T14:20:20,486 DEBUG [M:0;69430dbfd73f:40909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d0d3a601da9e4e4c84d159d2bfe2f6ee is 82, key is hbase:meta,,1/info:regioninfo/1730989047559/Put/seqid=0 2024-11-07T14:20:20,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742462_1638 (size=5672) 2024-11-07T14:20:20,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T14:20:20,565 INFO [RS:0;69430dbfd73f:45917 {}] regionserver.HRegionServer(1307): Exiting; stopping=69430dbfd73f,45917,1730989044081; zookeeper connection closed. 2024-11-07T14:20:20,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45917-0x1018318208e0001, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T14:20:20,565 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ee5f91c {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ee5f91c 2024-11-07T14:20:20,565 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T14:20:20,890 INFO [M:0;69430dbfd73f:40909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2163 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d0d3a601da9e4e4c84d159d2bfe2f6ee 2024-11-07T14:20:20,913 DEBUG [M:0;69430dbfd73f:40909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2e5fd4d65104faa8398400f7cb1db91 is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x94/proc:d/1730989193306/Put/seqid=0 2024-11-07T14:20:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742463_1639 (size=43979) 2024-11-07T14:20:21,318 INFO [M:0;69430dbfd73f:40909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=764.15 KB at sequenceid=2163 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2e5fd4d65104faa8398400f7cb1db91 2024-11-07T14:20:21,321 INFO [M:0;69430dbfd73f:40909 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a2e5fd4d65104faa8398400f7cb1db91 2024-11-07T14:20:21,339 DEBUG [M:0;69430dbfd73f:40909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e481375411f74afea93c385b34dd3892 is 69, key is 69430dbfd73f,45917,1730989044081/rs:state/1730989046218/Put/seqid=0 2024-11-07T14:20:21,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073742464_1640 (size=5156) 2024-11-07T14:20:21,744 INFO [M:0;69430dbfd73f:40909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2163 (bloomFilter=true), to=hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e481375411f74afea93c385b34dd3892 2024-11-07T14:20:21,747 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d0d3a601da9e4e4c84d159d2bfe2f6ee as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d0d3a601da9e4e4c84d159d2bfe2f6ee 2024-11-07T14:20:21,750 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d0d3a601da9e4e4c84d159d2bfe2f6ee, entries=8, sequenceid=2163, filesize=5.5 K 2024-11-07T14:20:21,751 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2e5fd4d65104faa8398400f7cb1db91 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a2e5fd4d65104faa8398400f7cb1db91 2024-11-07T14:20:21,753 INFO [M:0;69430dbfd73f:40909 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a2e5fd4d65104faa8398400f7cb1db91 2024-11-07T14:20:21,753 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a2e5fd4d65104faa8398400f7cb1db91, entries=173, sequenceid=2163, filesize=42.9 K 2024-11-07T14:20:21,754 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e481375411f74afea93c385b34dd3892 as hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e481375411f74afea93c385b34dd3892 2024-11-07T14:20:21,756 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34807/user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e481375411f74afea93c385b34dd3892, entries=1, sequenceid=2163, filesize=5.0 K 2024-11-07T14:20:21,757 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(3040): Finished flush of dataSize ~764.71 KB/783061, heapSize ~939.79 KB/962344, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1288ms, sequenceid=2163, compaction requested=false 2024-11-07T14:20:21,758 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T14:20:21,758 DEBUG [M:0;69430dbfd73f:40909 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T14:20:21,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34705 is added to blk_1073741830_1006 (size=924871) 2024-11-07T14:20:21,760 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(725): complete file /user/jenkins/test-data/682293a0-ac11-c75e-6a84-31fac05703e8/MasterData/WALs/69430dbfd73f,40909,1730989043291/69430dbfd73f%2C40909%2C1730989043291.1730989045655 not finished, retry = 0 2024-11-07T14:20:21,861 INFO [M:0;69430dbfd73f:40909 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-07T14:20:21,861 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T14:20:21,861 INFO [M:0;69430dbfd73f:40909 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40909 2024-11-07T14:20:21,864 DEBUG [M:0;69430dbfd73f:40909 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/69430dbfd73f,40909,1730989043291 already deleted, retry=false 2024-11-07T14:20:21,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T14:20:21,966 INFO [M:0;69430dbfd73f:40909 {}] regionserver.HRegionServer(1307): Exiting; stopping=69430dbfd73f,40909,1730989043291; zookeeper connection closed. 2024-11-07T14:20:21,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40909-0x1018318208e0000, quorum=127.0.0.1:51818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T14:20:21,971 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T14:20:21,973 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T14:20:21,973 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T14:20:21,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T14:20:21,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.log.dir/,STOPPED} 2024-11-07T14:20:21,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T14:20:21,977 WARN [BP-551066518-172.17.0.2-1730989040460 heartbeating to localhost/127.0.0.1:34807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T14:20:21,977 WARN [BP-551066518-172.17.0.2-1730989040460 heartbeating to localhost/127.0.0.1:34807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551066518-172.17.0.2-1730989040460 (Datanode Uuid 1bdab903-9c17-4603-9ac5-a031956640f6) service to localhost/127.0.0.1:34807 2024-11-07T14:20:21,977 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T14:20:21,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data1/current/BP-551066518-172.17.0.2-1730989040460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T14:20:21,980 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/cluster_f3a4e477-7868-e1b1-52ac-86c06ee32671/dfs/data/data2/current/BP-551066518-172.17.0.2-1730989040460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T14:20:21,980 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T14:20:21,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T14:20:21,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T14:20:21,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T14:20:21,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T14:20:21,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e95e207e-04b6-c997-a405-839c94583c6e/hadoop.log.dir/,STOPPED} 2024-11-07T14:20:22,005 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-07T14:20:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down